summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src
diff options
context:
space:
mode:
authorSergio Ahumada <sergio.ahumada@digia.com>2013-03-19 09:25:14 +0100
committerSergio Ahumada <sergio.ahumada@digia.com>2013-03-19 09:56:31 +0100
commit6313e1fe4c27755adde87e62db1c2f9fac534ae4 (patch)
treec57bb29f65e02fbfcc07895a8cc2903fff9300ba /src/3rdparty/v8/src
parentb5a49a260d03249c386f1b63c249089383dd81fa (diff)
parentcac65e7a222b848a735a974b0aeb43209b0cfa18 (diff)
Merge branch 'dev' into stable
This starts Qt 5.1 release cycle Change-Id: I892bbc73c276842894a720f761ce31ad1b015672
Diffstat (limited to 'src/3rdparty/v8/src')
-rwxr-xr-xsrc/3rdparty/v8/src/SConscript28
-rw-r--r--src/3rdparty/v8/src/accessors.cc195
-rw-r--r--src/3rdparty/v8/src/accessors.h4
-rw-r--r--src/3rdparty/v8/src/allocation-inl.h4
-rw-r--r--src/3rdparty/v8/src/allocation.h14
-rw-r--r--src/3rdparty/v8/src/api.cc515
-rw-r--r--src/3rdparty/v8/src/api.h143
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm-inl.h158
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.cc363
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.h125
-rw-r--r--src/3rdparty/v8/src/arm/builtins-arm.cc107
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.cc625
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.h20
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.cc127
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.h3
-rw-r--r--src/3rdparty/v8/src/arm/constants-arm.h37
-rw-r--r--src/3rdparty/v8/src/arm/cpu-arm.cc7
-rw-r--r--src/3rdparty/v8/src/arm/debug-arm.cc4
-rw-r--r--src/3rdparty/v8/src/arm/deoptimizer-arm.cc162
-rw-r--r--src/3rdparty/v8/src/arm/disasm-arm.cc36
-rw-r--r--src/3rdparty/v8/src/arm/full-codegen-arm.cc425
-rw-r--r--src/3rdparty/v8/src/arm/ic-arm.cc273
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.cc501
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.h651
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.cc1580
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.h87
-rw-r--r--src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc4
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.cc490
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.h128
-rw-r--r--src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc201
-rw-r--r--src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h26
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.cc284
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.h48
-rw-r--r--src/3rdparty/v8/src/arm/stub-cache-arm.cc555
-rw-r--r--src/3rdparty/v8/src/array.js168
-rw-r--r--src/3rdparty/v8/src/assembler.cc136
-rw-r--r--src/3rdparty/v8/src/assembler.h85
-rw-r--r--src/3rdparty/v8/src/ast.cc110
-rw-r--r--src/3rdparty/v8/src/ast.h471
-rw-r--r--src/3rdparty/v8/src/atomicops.h6
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_x86_msvc.h6
-rw-r--r--src/3rdparty/v8/src/bootstrapper.cc837
-rw-r--r--src/3rdparty/v8/src/bootstrapper.h4
-rw-r--r--src/3rdparty/v8/src/builtins.cc154
-rw-r--r--src/3rdparty/v8/src/builtins.h41
-rw-r--r--src/3rdparty/v8/src/checks.h8
-rw-r--r--src/3rdparty/v8/src/code-stubs.cc70
-rw-r--r--src/3rdparty/v8/src/code-stubs.h63
-rw-r--r--src/3rdparty/v8/src/codegen.h6
-rw-r--r--src/3rdparty/v8/src/collection.js71
-rw-r--r--src/3rdparty/v8/src/compilation-cache.cc40
-rw-r--r--src/3rdparty/v8/src/compilation-cache.h21
-rw-r--r--src/3rdparty/v8/src/compiler-intrinsics.h24
-rw-r--r--src/3rdparty/v8/src/compiler.cc562
-rw-r--r--src/3rdparty/v8/src/compiler.h198
-rw-r--r--src/3rdparty/v8/src/contexts.cc83
-rw-r--r--src/3rdparty/v8/src/contexts.h105
-rw-r--r--src/3rdparty/v8/src/conversions-inl.h36
-rw-r--r--src/3rdparty/v8/src/conversions.h11
-rw-r--r--src/3rdparty/v8/src/counters.cc26
-rw-r--r--src/3rdparty/v8/src/counters.h60
-rw-r--r--src/3rdparty/v8/src/cpu-profiler.h2
-rw-r--r--src/3rdparty/v8/src/d8.cc710
-rw-r--r--src/3rdparty/v8/src/d8.h25
-rw-r--r--src/3rdparty/v8/src/date.js41
-rw-r--r--src/3rdparty/v8/src/dateparser-inl.h6
-rw-r--r--src/3rdparty/v8/src/debug-agent.cc6
-rw-r--r--src/3rdparty/v8/src/debug-debugger.js38
-rw-r--r--src/3rdparty/v8/src/debug.cc429
-rw-r--r--src/3rdparty/v8/src/debug.h30
-rw-r--r--src/3rdparty/v8/src/deoptimizer.cc615
-rw-r--r--src/3rdparty/v8/src/deoptimizer.h109
-rw-r--r--src/3rdparty/v8/src/disassembler.cc4
-rw-r--r--src/3rdparty/v8/src/elements-kind.cc139
-rw-r--r--src/3rdparty/v8/src/elements-kind.h229
-rw-r--r--src/3rdparty/v8/src/elements.cc636
-rw-r--r--src/3rdparty/v8/src/elements.h16
-rw-r--r--src/3rdparty/v8/src/execution.cc76
-rw-r--r--src/3rdparty/v8/src/execution.h5
-rw-r--r--src/3rdparty/v8/src/extensions/gc-extension.cc6
-rw-r--r--src/3rdparty/v8/src/extensions/statistics-extension.cc153
-rw-r--r--src/3rdparty/v8/src/extensions/statistics-extension.h49
-rw-r--r--src/3rdparty/v8/src/factory.cc226
-rw-r--r--src/3rdparty/v8/src/factory.h56
-rw-r--r--src/3rdparty/v8/src/flag-definitions.h78
-rw-r--r--src/3rdparty/v8/src/flags.cc14
-rw-r--r--src/3rdparty/v8/src/frames.cc66
-rw-r--r--src/3rdparty/v8/src/frames.h9
-rw-r--r--src/3rdparty/v8/src/full-codegen.cc138
-rw-r--r--src/3rdparty/v8/src/full-codegen.h59
-rw-r--r--src/3rdparty/v8/src/func-name-inferrer.cc15
-rw-r--r--src/3rdparty/v8/src/func-name-inferrer.h10
-rw-r--r--src/3rdparty/v8/src/gdb-jit.cc128
-rw-r--r--src/3rdparty/v8/src/global-handles.cc45
-rw-r--r--src/3rdparty/v8/src/global-handles.h15
-rw-r--r--src/3rdparty/v8/src/globals.h34
-rw-r--r--src/3rdparty/v8/src/handles-inl.h30
-rw-r--r--src/3rdparty/v8/src/handles.cc203
-rw-r--r--src/3rdparty/v8/src/handles.h45
-rw-r--r--src/3rdparty/v8/src/hashmap.h100
-rw-r--r--src/3rdparty/v8/src/heap-inl.h94
-rw-r--r--src/3rdparty/v8/src/heap-profiler.cc14
-rw-r--r--src/3rdparty/v8/src/heap-profiler.h6
-rw-r--r--src/3rdparty/v8/src/heap.cc1163
-rw-r--r--src/3rdparty/v8/src/heap.h325
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.cc483
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.h994
-rw-r--r--src/3rdparty/v8/src/hydrogen.cc2942
-rw-r--r--src/3rdparty/v8/src/hydrogen.h326
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32-inl.h48
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.cc61
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.h24
-rw-r--r--src/3rdparty/v8/src/ia32/builtins-ia32.cc90
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.cc272
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.cc103
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.h4
-rw-r--r--src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc240
-rw-r--r--src/3rdparty/v8/src/ia32/disasm-ia32.cc22
-rw-r--r--src/3rdparty/v8/src/ia32/frames-ia32.h6
-rw-r--r--src/3rdparty/v8/src/ia32/full-codegen-ia32.cc373
-rw-r--r--src/3rdparty/v8/src/ia32/ic-ia32.cc234
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc1240
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h54
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc4
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.cc554
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.h672
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc427
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.h65
-rw-r--r--src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc179
-rw-r--r--src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h25
-rw-r--r--src/3rdparty/v8/src/ia32/simulator-ia32.h8
-rw-r--r--src/3rdparty/v8/src/ia32/stub-cache-ia32.cc436
-rw-r--r--src/3rdparty/v8/src/ic-inl.h10
-rw-r--r--src/3rdparty/v8/src/ic.cc330
-rw-r--r--src/3rdparty/v8/src/ic.h32
-rw-r--r--src/3rdparty/v8/src/incremental-marking-inl.h29
-rw-r--r--src/3rdparty/v8/src/incremental-marking.cc393
-rw-r--r--src/3rdparty/v8/src/incremental-marking.h51
-rw-r--r--src/3rdparty/v8/src/interface.cc46
-rw-r--r--src/3rdparty/v8/src/interface.h59
-rw-r--r--src/3rdparty/v8/src/isolate.cc180
-rw-r--r--src/3rdparty/v8/src/isolate.h91
-rw-r--r--src/3rdparty/v8/src/json-parser.h266
-rw-r--r--src/3rdparty/v8/src/json-stringifier.h800
-rw-r--r--src/3rdparty/v8/src/json.js142
-rw-r--r--src/3rdparty/v8/src/jsregexp.cc741
-rw-r--r--src/3rdparty/v8/src/jsregexp.h317
-rw-r--r--src/3rdparty/v8/src/list-inl.h46
-rw-r--r--src/3rdparty/v8/src/list.h62
-rw-r--r--src/3rdparty/v8/src/lithium-allocator.cc89
-rw-r--r--src/3rdparty/v8/src/lithium-allocator.h7
-rw-r--r--src/3rdparty/v8/src/lithium.cc213
-rw-r--r--src/3rdparty/v8/src/lithium.h152
-rw-r--r--src/3rdparty/v8/src/liveedit-debugger.js18
-rw-r--r--src/3rdparty/v8/src/liveedit.cc290
-rw-r--r--src/3rdparty/v8/src/liveedit.h10
-rw-r--r--src/3rdparty/v8/src/liveobjectlist.cc4
-rw-r--r--src/3rdparty/v8/src/log.cc113
-rw-r--r--src/3rdparty/v8/src/log.h25
-rw-r--r--src/3rdparty/v8/src/mark-compact-inl.h26
-rw-r--r--src/3rdparty/v8/src/mark-compact.cc1234
-rw-r--r--src/3rdparty/v8/src/mark-compact.h149
-rw-r--r--src/3rdparty/v8/src/messages.cc19
-rw-r--r--src/3rdparty/v8/src/messages.js440
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips-inl.h10
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips.cc26
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips.h46
-rw-r--r--src/3rdparty/v8/src/mips/builtins-mips.cc72
-rw-r--r--src/3rdparty/v8/src/mips/code-stubs-mips.cc337
-rw-r--r--src/3rdparty/v8/src/mips/codegen-mips.cc4
-rw-r--r--src/3rdparty/v8/src/mips/deoptimizer-mips.cc155
-rw-r--r--src/3rdparty/v8/src/mips/full-codegen-mips.cc372
-rw-r--r--src/3rdparty/v8/src/mips/ic-mips.cc275
-rw-r--r--src/3rdparty/v8/src/mips/lithium-codegen-mips.cc1200
-rw-r--r--src/3rdparty/v8/src/mips/lithium-codegen-mips.h64
-rw-r--r--src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc4
-rw-r--r--src/3rdparty/v8/src/mips/lithium-mips.cc482
-rw-r--r--src/3rdparty/v8/src/mips/lithium-mips.h626
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.cc280
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.h61
-rw-r--r--src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc183
-rw-r--r--src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h23
-rw-r--r--src/3rdparty/v8/src/mips/simulator-mips.cc18
-rw-r--r--src/3rdparty/v8/src/mips/simulator-mips.h10
-rw-r--r--src/3rdparty/v8/src/mips/stub-cache-mips.cc451
-rw-r--r--src/3rdparty/v8/src/mirror-debugger.js162
-rw-r--r--src/3rdparty/v8/src/misc-intrinsics.h2
-rw-r--r--src/3rdparty/v8/src/mksnapshot.cc106
-rw-r--r--src/3rdparty/v8/src/object-observe.js240
-rw-r--r--src/3rdparty/v8/src/objects-debug.cc346
-rw-r--r--src/3rdparty/v8/src/objects-inl.h1408
-rw-r--r--src/3rdparty/v8/src/objects-printer.cc538
-rw-r--r--src/3rdparty/v8/src/objects-visiting-inl.h535
-rw-r--r--src/3rdparty/v8/src/objects-visiting.cc2
-rw-r--r--src/3rdparty/v8/src/objects-visiting.h219
-rw-r--r--src/3rdparty/v8/src/objects.cc5007
-rw-r--r--src/3rdparty/v8/src/objects.h1747
-rw-r--r--src/3rdparty/v8/src/optimizing-compiler-thread.cc132
-rw-r--r--src/3rdparty/v8/src/optimizing-compiler-thread.h101
-rw-r--r--src/3rdparty/v8/src/parser.cc809
-rw-r--r--src/3rdparty/v8/src/parser.h77
-rw-r--r--src/3rdparty/v8/src/platform-cygwin.cc6
-rw-r--r--src/3rdparty/v8/src/platform-freebsd.cc6
-rw-r--r--src/3rdparty/v8/src/platform-linux.cc106
-rw-r--r--src/3rdparty/v8/src/platform-macos.cc27
-rw-r--r--src/3rdparty/v8/src/platform-nullos.cc13
-rw-r--r--src/3rdparty/v8/src/platform-openbsd.cc11
-rw-r--r--src/3rdparty/v8/src/platform-posix.cc5
-rw-r--r--src/3rdparty/v8/src/platform-qnx.cc19
-rw-r--r--src/3rdparty/v8/src/platform-solaris.cc14
-rw-r--r--src/3rdparty/v8/src/platform-tls-win32.h2
-rw-r--r--src/3rdparty/v8/src/platform-win32.cc125
-rw-r--r--src/3rdparty/v8/src/platform.h32
-rw-r--r--src/3rdparty/v8/src/preparser.cc13
-rw-r--r--src/3rdparty/v8/src/profile-generator-inl.h21
-rw-r--r--src/3rdparty/v8/src/profile-generator.cc507
-rw-r--r--src/3rdparty/v8/src/profile-generator.h63
-rw-r--r--src/3rdparty/v8/src/property-details.h73
-rw-r--r--src/3rdparty/v8/src/property.cc107
-rw-r--r--src/3rdparty/v8/src/property.h179
-rw-r--r--src/3rdparty/v8/src/proxy.js9
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc20
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h7
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc23
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler-tracer.h3
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler.cc21
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler.h27
-rw-r--r--src/3rdparty/v8/src/regexp-stack.cc1
-rw-r--r--src/3rdparty/v8/src/regexp.js15
-rw-r--r--src/3rdparty/v8/src/rewriter.cc14
-rw-r--r--src/3rdparty/v8/src/runtime-profiler.cc62
-rw-r--r--src/3rdparty/v8/src/runtime.cc2403
-rw-r--r--src/3rdparty/v8/src/runtime.h50
-rw-r--r--src/3rdparty/v8/src/safepoint-table.cc19
-rw-r--r--src/3rdparty/v8/src/safepoint-table.h19
-rwxr-xr-xsrc/3rdparty/v8/src/scanner.cc6
-rw-r--r--src/3rdparty/v8/src/scopeinfo.cc16
-rw-r--r--src/3rdparty/v8/src/scopes.cc399
-rw-r--r--src/3rdparty/v8/src/scopes.h66
-rw-r--r--src/3rdparty/v8/src/serialize.cc716
-rw-r--r--src/3rdparty/v8/src/serialize.h233
-rw-r--r--src/3rdparty/v8/src/small-pointer-list.h22
-rw-r--r--src/3rdparty/v8/src/smart-pointers.h (renamed from src/3rdparty/v8/src/smart-array-pointer.h)67
-rw-r--r--src/3rdparty/v8/src/snapshot-common.cc79
-rw-r--r--src/3rdparty/v8/src/snapshot-empty.cc8
-rw-r--r--src/3rdparty/v8/src/snapshot.h11
-rw-r--r--src/3rdparty/v8/src/spaces-inl.h17
-rw-r--r--src/3rdparty/v8/src/spaces.cc194
-rw-r--r--src/3rdparty/v8/src/spaces.h111
-rw-r--r--src/3rdparty/v8/src/splay-tree-inl.h15
-rw-r--r--src/3rdparty/v8/src/splay-tree.h34
-rw-r--r--src/3rdparty/v8/src/store-buffer.cc6
-rw-r--r--src/3rdparty/v8/src/store-buffer.h2
-rw-r--r--src/3rdparty/v8/src/string-stream.cc17
-rw-r--r--src/3rdparty/v8/src/stub-cache.cc175
-rw-r--r--src/3rdparty/v8/src/stub-cache.h81
-rw-r--r--src/3rdparty/v8/src/token.h1
-rw-r--r--src/3rdparty/v8/src/transitions-inl.h220
-rw-r--r--src/3rdparty/v8/src/transitions.cc160
-rw-r--r--src/3rdparty/v8/src/transitions.h207
-rw-r--r--src/3rdparty/v8/src/type-info.cc173
-rw-r--r--src/3rdparty/v8/src/type-info.h32
-rw-r--r--src/3rdparty/v8/src/unicode-inl.h2
-rw-r--r--src/3rdparty/v8/src/unicode.h6
-rw-r--r--src/3rdparty/v8/src/utils.h62
-rw-r--r--src/3rdparty/v8/src/v8-counters.cc58
-rw-r--r--src/3rdparty/v8/src/v8-counters.h144
-rw-r--r--src/3rdparty/v8/src/v8.cc26
-rw-r--r--src/3rdparty/v8/src/v8.h6
-rw-r--r--src/3rdparty/v8/src/v8globals.h70
-rw-r--r--src/3rdparty/v8/src/v8natives.js20
-rw-r--r--src/3rdparty/v8/src/v8threads.cc23
-rw-r--r--src/3rdparty/v8/src/v8threads.h5
-rw-r--r--src/3rdparty/v8/src/v8utils.cc17
-rw-r--r--src/3rdparty/v8/src/v8utils.h5
-rw-r--r--src/3rdparty/v8/src/variables.cc7
-rw-r--r--src/3rdparty/v8/src/variables.h15
-rw-r--r--src/3rdparty/v8/src/version.cc4
-rw-r--r--src/3rdparty/v8/src/vm-state-inl.h2
-rw-r--r--src/3rdparty/v8/src/win32-headers.h24
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64-inl.h45
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.cc25
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.h27
-rw-r--r--src/3rdparty/v8/src/x64/builtins-x64.cc96
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.cc335
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.cc91
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.h2
-rw-r--r--src/3rdparty/v8/src/x64/deoptimizer-x64.cc158
-rw-r--r--src/3rdparty/v8/src/x64/disasm-x64.cc3
-rw-r--r--src/3rdparty/v8/src/x64/full-codegen-x64.cc381
-rw-r--r--src/3rdparty/v8/src/x64/ic-x64.cc240
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.cc1263
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.h52
-rw-r--r--src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc4
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.cc511
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.h605
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.cc494
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.h76
-rw-r--r--src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc201
-rw-r--r--src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h30
-rw-r--r--src/3rdparty/v8/src/x64/simulator-x64.h8
-rw-r--r--src/3rdparty/v8/src/x64/stub-cache-x64.cc437
-rw-r--r--src/3rdparty/v8/src/zone-inl.h36
-rw-r--r--src/3rdparty/v8/src/zone.cc10
-rw-r--r--src/3rdparty/v8/src/zone.h80
305 files changed, 40374 insertions, 22351 deletions
diff --git a/src/3rdparty/v8/src/SConscript b/src/3rdparty/v8/src/SConscript
index 0d0b535..16bfb55 100755
--- a/src/3rdparty/v8/src/SConscript
+++ b/src/3rdparty/v8/src/SConscript
@@ -43,8 +43,8 @@ SOURCES = {
assembler.cc
ast.cc
atomicops_internals_x86_gcc.cc
- bignum.cc
bignum-dtoa.cc
+ bignum.cc
bootstrapper.cc
builtins.cc
cached-powers.cc
@@ -67,26 +67,30 @@ SOURCES = {
disassembler.cc
diy-fp.cc
dtoa.cc
+ elements-kind.cc
elements.cc
execution.cc
+ extensions/externalize-string-extension.cc
+ extensions/gc-extension.cc
+ extensions/statistics-extension.cc
factory.cc
+ fast-dtoa.cc
+ fixed-dtoa.cc
flags.cc
frames.cc
full-codegen.cc
func-name-inferrer.cc
gdb-jit.cc
global-handles.cc
- fast-dtoa.cc
- fixed-dtoa.cc
handles.cc
heap-profiler.cc
heap.cc
- hydrogen.cc
hydrogen-instructions.cc
+ hydrogen.cc
ic.cc
incremental-marking.cc
- interface.cc
inspector.cc
+ interface.cc
interpreter-irregexp.cc
isolate.cc
jsregexp.cc
@@ -98,34 +102,37 @@ SOURCES = {
log.cc
mark-compact.cc
messages.cc
- objects.cc
objects-printer.cc
objects-visiting.cc
+ objects.cc
once.cc
+ optimizing-compiler-thread.cc
parser.cc
- preparser.cc
preparse-data.cc
+ preparser.cc
profile-generator.cc
property.cc
regexp-macro-assembler-irregexp.cc
regexp-macro-assembler.cc
regexp-stack.cc
rewriter.cc
- runtime.cc
runtime-profiler.cc
+ runtime.cc
safepoint-table.cc
- scanner.cc
scanner-character-streams.cc
+ scanner.cc
scopeinfo.cc
scopes.cc
serialize.cc
snapshot-common.cc
spaces.cc
+ store-buffer.cc
string-search.cc
string-stream.cc
strtod.cc
stub-cache.cc
token.cc
+ transitions.cc
type-info.cc
unicode.cc
utils.cc
@@ -136,10 +143,7 @@ SOURCES = {
v8utils.cc
variables.cc
version.cc
- store-buffer.cc
zone.cc
- extensions/gc-extension.cc
- extensions/externalize-string-extension.cc
"""),
'arch:arm': Split("""
arm/builtins-arm.cc
diff --git a/src/3rdparty/v8/src/accessors.cc b/src/3rdparty/v8/src/accessors.cc
index 8048738..c2f245c 100644
--- a/src/3rdparty/v8/src/accessors.cc
+++ b/src/3rdparty/v8/src/accessors.cc
@@ -42,15 +42,11 @@ namespace internal {
template <class C>
-static C* FindInPrototypeChain(Object* obj, bool* found_it) {
- ASSERT(!*found_it);
- Heap* heap = HEAP;
- while (!Is<C>(obj)) {
- if (obj == heap->null_value()) return NULL;
- obj = obj->GetPrototype();
+static C* FindInstanceOf(Object* obj) {
+ for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype()) {
+ if (Is<C>(cur)) return C::cast(cur);
}
- *found_it = true;
- return C::cast(obj);
+ return NULL;
}
@@ -81,10 +77,8 @@ MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
// Traverse the prototype chain until we reach an array.
- bool found_it = false;
- JSArray* holder = FindInPrototypeChain<JSArray>(object, &found_it);
- if (!found_it) return Smi::FromInt(0);
- return holder->length();
+ JSArray* holder = FindInstanceOf<JSArray>(object);
+ return holder == NULL ? Smi::FromInt(0) : holder->length();
}
@@ -92,15 +86,56 @@ MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
Object* Accessors::FlattenNumber(Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
- ASSERT(Isolate::Current()->context()->global_context()->number_function()->
+ ASSERT(Isolate::Current()->context()->native_context()->number_function()->
has_initial_map());
- Map* number_map = Isolate::Current()->context()->global_context()->
+ Map* number_map = Isolate::Current()->context()->native_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
}
+static MaybeObject* ArraySetLengthObserved(Isolate* isolate,
+ Handle<JSArray> array,
+ Handle<Object> new_length_handle) {
+ List<Handle<String> > indices;
+ List<Handle<Object> > old_values;
+ Handle<Object> old_length_handle(array->length(), isolate);
+ uint32_t old_length = 0;
+ CHECK(old_length_handle->ToArrayIndex(&old_length));
+ uint32_t new_length = 0;
+ CHECK(new_length_handle->ToArrayIndex(&new_length));
+ // TODO(adamk): This loop can be very slow for arrays in dictionary mode.
+ // Find another way to iterate over arrays with dictionary elements.
+ for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
+ PropertyAttributes attributes = array->GetLocalElementAttribute(i);
+ if (attributes == ABSENT) continue;
+ // A non-configurable property will cause the truncation operation to
+ // stop at this index.
+ if (attributes == DONT_DELETE) break;
+ // TODO(adamk): Don't fetch the old value if it's an accessor.
+ old_values.Add(Object::GetElement(array, i));
+ indices.Add(isolate->factory()->Uint32ToString(i));
+ }
+
+ MaybeObject* result = array->SetElementsLength(*new_length_handle);
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult)) return result;
+
+ CHECK(array->length()->ToArrayIndex(&new_length));
+ if (old_length != new_length) {
+ for (int i = 0; i < indices.length(); ++i) {
+ JSObject::EnqueueChangeRecord(
+ array, "deleted", indices[i], old_values[i]);
+ }
+ JSObject::EnqueueChangeRecord(
+ array, "updated", isolate->factory()->length_symbol(),
+ old_length_handle);
+ }
+ return *hresult;
+}
+
+
MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Isolate* isolate = object->GetIsolate();
@@ -118,7 +153,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
HandleScope scope(isolate);
// Protect raw pointers.
- Handle<JSObject> object_handle(object, isolate);
+ Handle<JSArray> array_handle(JSArray::cast(object), isolate);
Handle<Object> value_handle(value, isolate);
bool has_exception;
@@ -128,7 +163,11 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
- return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v);
+ if (FLAG_harmony_observation && array_handle->map()->is_observed()) {
+ return ArraySetLengthObserved(isolate, array_handle, uint32_v);
+ } else {
+ return array_handle->SetElementsLength(*uint32_v);
+ }
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
@@ -448,15 +487,12 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Heap* heap = Isolate::Current()->heap();
- bool found_it = false;
- JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return heap->undefined_value();
+ JSFunction* function = FindInstanceOf<JSFunction>(object);
+ if (function == NULL) return heap->undefined_value();
while (!function->should_have_prototype()) {
- found_it = false;
- function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
- &found_it);
+ function = FindInstanceOf<JSFunction>(function->GetPrototype());
// There has to be one because we hit the getter.
- ASSERT(found_it);
+ ASSERT(function != NULL);
}
if (!function->has_prototype()) {
@@ -477,9 +513,8 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
Object* value,
void*) {
Heap* heap = object->GetHeap();
- bool found_it = false;
- JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return heap->undefined_value();
+ JSFunction* function = FindInstanceOf<JSFunction>(object);
+ if (function == NULL) return heap->undefined_value();
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
@@ -509,22 +544,20 @@ const AccessorDescriptor Accessors::FunctionPrototype = {
MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
- bool found_it = false;
- JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return Smi::FromInt(0);
+ JSFunction* function = FindInstanceOf<JSFunction>(object);
+ if (function == NULL) return Smi::FromInt(0);
// Check if already compiled.
- if (!function->shared()->is_compiled()) {
- // If the function isn't compiled yet, the length is not computed
- // correctly yet. Compile it now and return the right length.
- HandleScope scope;
- Handle<JSFunction> handle(function);
- if (!JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
- return Smi::FromInt(handle->shared()->length());
- } else {
+ if (function->shared()->is_compiled()) {
return Smi::FromInt(function->shared()->length());
}
+ // If the function isn't compiled yet, the length is not computed correctly
+ // yet. Compile it now and return the right length.
+ HandleScope scope;
+ Handle<JSFunction> handle(function);
+ if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
+ return Smi::FromInt(handle->shared()->length());
+ }
+ return Failure::Exception();
}
@@ -541,10 +574,8 @@ const AccessorDescriptor Accessors::FunctionLength = {
MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
- bool found_it = false;
- JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return HEAP->undefined_value();
- return holder->shared()->name();
+ JSFunction* holder = FindInstanceOf<JSFunction>(object);
+ return holder == NULL ? HEAP->undefined_value() : holder->shared()->name();
}
@@ -589,9 +620,8 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
- bool found_it = false;
- JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return isolate->heap()->undefined_value();
+ JSFunction* holder = FindInstanceOf<JSFunction>(object);
+ if (holder == NULL) return isolate->heap()->undefined_value();
Handle<JSFunction> function(holder, isolate);
if (function->shared()->native()) return isolate->heap()->null_value();
@@ -727,9 +757,8 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
AssertNoAllocation no_alloc;
- bool found_it = false;
- JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return isolate->heap()->undefined_value();
+ JSFunction* holder = FindInstanceOf<JSFunction>(object);
+ if (holder == NULL) return isolate->heap()->undefined_value();
if (holder->shared()->native()) return isolate->heap()->null_value();
Handle<JSFunction> function(holder, isolate);
@@ -755,6 +784,9 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
caller = potential_caller;
potential_caller = it.next();
}
+ if (!caller->shared()->native() && potential_caller != NULL) {
+ caller = potential_caller;
+ }
// If caller is bound, return null. This is compatible with JSC, and
// allows us to make bound functions use the strict function map
// and its associated throwing caller and arguments.
@@ -802,4 +834,69 @@ const AccessorDescriptor Accessors::ObjectPrototype = {
0
};
+
+//
+// Accessors::MakeModuleExport
+//
+
+static v8::Handle<v8::Value> ModuleGetExport(
+ v8::Local<v8::String> property,
+ const v8::AccessorInfo& info) {
+ JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
+ Context* context = Context::cast(instance->context());
+ ASSERT(context->IsModuleContext());
+ int slot = info.Data()->Int32Value();
+ Object* value = context->get(slot);
+ if (value->IsTheHole()) {
+ Handle<String> name = v8::Utils::OpenHandle(*property);
+ Isolate* isolate = instance->GetIsolate();
+ isolate->ScheduleThrow(
+ *isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1)));
+ return v8::Handle<v8::Value>();
+ }
+ return v8::Utils::ToLocal(Handle<Object>(value));
+}
+
+
+static void ModuleSetExport(
+ v8::Local<v8::String> property,
+ v8::Local<v8::Value> value,
+ const v8::AccessorInfo& info) {
+ JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
+ Context* context = Context::cast(instance->context());
+ ASSERT(context->IsModuleContext());
+ int slot = info.Data()->Int32Value();
+ Object* old_value = context->get(slot);
+ if (old_value->IsTheHole()) {
+ Handle<String> name = v8::Utils::OpenHandle(*property);
+ Isolate* isolate = instance->GetIsolate();
+ isolate->ScheduleThrow(
+ *isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1)));
+ return;
+ }
+ context->set(slot, *v8::Utils::OpenHandle(*value));
+}
+
+
+Handle<AccessorInfo> Accessors::MakeModuleExport(
+ Handle<String> name,
+ int index,
+ PropertyAttributes attributes) {
+ Factory* factory = name->GetIsolate()->factory();
+ Handle<AccessorInfo> info = factory->NewAccessorInfo();
+ info->set_property_attributes(attributes);
+ info->set_all_can_read(true);
+ info->set_all_can_write(true);
+ info->set_name(*name);
+ info->set_data(Smi::FromInt(index));
+ Handle<Object> getter = v8::FromCData(&ModuleGetExport);
+ Handle<Object> setter = v8::FromCData(&ModuleSetExport);
+ info->set_getter(*getter);
+ if (!(attributes & ReadOnly)) info->set_setter(*setter);
+ return info;
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/accessors.h b/src/3rdparty/v8/src/accessors.h
index 36b9a99..250f742 100644
--- a/src/3rdparty/v8/src/accessors.h
+++ b/src/3rdparty/v8/src/accessors.h
@@ -85,6 +85,10 @@ class Accessors : public AllStatic {
void*);
static MaybeObject* FunctionGetArguments(Object* object, void*);
+ // Accessor infos.
+ static Handle<AccessorInfo> MakeModuleExport(
+ Handle<String> name, int index, PropertyAttributes attributes);
+
private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionGetLength(Object* object, void*);
diff --git a/src/3rdparty/v8/src/allocation-inl.h b/src/3rdparty/v8/src/allocation-inl.h
index 04a3fe6..d32db4b 100644
--- a/src/3rdparty/v8/src/allocation-inl.h
+++ b/src/3rdparty/v8/src/allocation-inl.h
@@ -34,12 +34,12 @@ namespace v8 {
namespace internal {
-void* PreallocatedStorage::New(size_t size) {
+void* PreallocatedStorageAllocationPolicy::New(size_t size) {
return Isolate::Current()->PreallocatedStorageNew(size);
}
-void PreallocatedStorage::Delete(void* p) {
+void PreallocatedStorageAllocationPolicy::Delete(void* p) {
return Isolate::Current()->PreallocatedStorageDelete(p);
}
diff --git a/src/3rdparty/v8/src/allocation.h b/src/3rdparty/v8/src/allocation.h
index 31067dd..45bde4c 100644
--- a/src/3rdparty/v8/src/allocation.h
+++ b/src/3rdparty/v8/src/allocation.h
@@ -104,7 +104,7 @@ char* StrNDup(const char* str, int n);
// and free. Used as the default policy for lists.
class FreeStoreAllocationPolicy {
public:
- INLINE(static void* New(size_t size)) { return Malloced::New(size); }
+ INLINE(void* New(size_t size)) { return Malloced::New(size); }
INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
};
@@ -117,12 +117,6 @@ class PreallocatedStorage {
explicit PreallocatedStorage(size_t size);
size_t size() { return size_; }
- // TODO(isolates): Get rid of these-- we'll have to change the allocator
- // interface to include a pointer to an isolate to do this
- // efficiently.
- static inline void* New(size_t size);
- static inline void Delete(void* p);
-
private:
size_t size_;
PreallocatedStorage* previous_;
@@ -137,6 +131,12 @@ class PreallocatedStorage {
};
+struct PreallocatedStorageAllocationPolicy {
+ INLINE(void* New(size_t size));
+ INLINE(static void Delete(void* ptr));
+};
+
+
} } // namespace v8::internal
#endif // V8_ALLOCATION_H_
diff --git a/src/3rdparty/v8/src/api.cc b/src/3rdparty/v8/src/api.cc
index 70d0a8a..cbb3a04 100644
--- a/src/3rdparty/v8/src/api.cc
+++ b/src/3rdparty/v8/src/api.cc
@@ -33,6 +33,7 @@
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
#include "bootstrapper.h"
+#include "code-stubs.h"
#include "compiler.h"
#include "conversions-inl.h"
#include "counters.h"
@@ -540,7 +541,9 @@ Extension::Extension(const char* name,
source_(source, source_length_),
dep_count_(dep_count),
deps_(deps),
- auto_enable_(false) { }
+ auto_enable_(false) {
+ CHECK(source != NULL || source_length_ == 0);
+}
v8::Handle<Primitive> Undefined() {
@@ -640,11 +643,48 @@ void V8::ClearWeak(i::Object** obj) {
void V8::MarkIndependent(i::Object** object) {
i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "MakeIndependent");
+ LOG_API(isolate, "MarkIndependent");
isolate->global_handles()->MarkIndependent(object);
}
+void V8::MarkIndependent(i::Isolate* isolate, i::Object** object) {
+ ASSERT(isolate == i::Isolate::Current());
+ LOG_API(isolate, "MarkIndependent");
+ isolate->global_handles()->MarkIndependent(object);
+}
+
+
+void V8::MarkPartiallyDependent(i::Object** object) {
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "MarkPartiallyDependent");
+ isolate->global_handles()->MarkPartiallyDependent(object);
+}
+
+
+void V8::MarkPartiallyDependent(i::Isolate* isolate, i::Object** object) {
+ ASSERT(isolate == i::Isolate::Current());
+ LOG_API(isolate, "MarkPartiallyDependent");
+ isolate->global_handles()->MarkPartiallyDependent(object);
+}
+
+
+bool V8::IsGlobalIndependent(i::Object** obj) {
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "IsGlobalIndependent");
+ if (!isolate->IsInitialized()) return false;
+ return i::GlobalHandles::IsIndependent(obj);
+}
+
+
+bool V8::IsGlobalIndependent(i::Isolate* isolate, i::Object** obj) {
+ ASSERT(isolate == i::Isolate::Current());
+ LOG_API(isolate, "IsGlobalIndependent");
+ if (!isolate->IsInitialized()) return false;
+ return i::GlobalHandles::IsIndependent(obj);
+}
+
+
bool V8::IsGlobalNearDeath(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "IsGlobalNearDeath");
@@ -668,6 +708,14 @@ void V8::DisposeGlobal(i::Object** obj) {
isolate->global_handles()->Destroy(obj);
}
+
+void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) {
+ ASSERT(isolate == i::Isolate::Current());
+ LOG_API(isolate, "DisposeGlobal");
+ if (!isolate->IsInitialized()) return;
+ isolate->global_handles()->Destroy(obj);
+}
+
// --- H a n d l e s ---
@@ -762,13 +810,13 @@ void Context::Exit() {
}
-void Context::SetData(v8::Handle<String> data) {
+void Context::SetData(v8::Handle<Value> data) {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
+ ASSERT(env->IsNativeContext());
+ if (env->IsNativeContext()) {
env->set_data(*raw_data);
}
}
@@ -778,16 +826,13 @@ v8::Local<v8::Value> Context::GetData() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
- return v8::Local<Value>();
+ return Local<Value>();
}
- i::Object* raw_result = NULL;
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
- raw_result = env->data();
- } else {
+ ASSERT(env->IsNativeContext());
+ if (!env->IsNativeContext()) {
return Local<Value>();
}
- i::Handle<i::Object> result(raw_result, isolate);
+ i::Handle<i::Object> result(env->data(), isolate);
return Utils::ToLocal(result);
}
@@ -990,6 +1035,12 @@ Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
}
+Local<AccessorSignature> AccessorSignature::New(
+ Handle<FunctionTemplate> receiver) {
+ return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
+}
+
+
Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
Handle<FunctionTemplate> types[1] = { type };
return TypeSwitch::New(1, types);
@@ -1057,9 +1108,9 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
AccessorSetter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
- v8::PropertyAttribute attributes) {
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
- ASSERT(getter != NULL);
SET_FIELD_WRAPPED(obj, set_getter, getter);
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
@@ -1069,6 +1120,9 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
+ if (!signature.IsEmpty()) {
+ obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
+ }
return obj;
}
@@ -1079,7 +1133,8 @@ void FunctionTemplate::AddInstancePropertyAccessor(
AccessorSetter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
- v8::PropertyAttribute attributes) {
+ v8::PropertyAttribute attributes,
+ v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate,
"v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
@@ -1088,9 +1143,9 @@ void FunctionTemplate::AddInstancePropertyAccessor(
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
- getter, setter, data,
- settings, attributes);
+ i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data,
+ settings, attributes,
+ signature);
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
if (list->IsUndefined()) {
list = NeanderArray().value();
@@ -1277,7 +1332,8 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
AccessorSetter setter,
v8::Handle<Value> data,
AccessControl settings,
- PropertyAttribute attribute) {
+ PropertyAttribute attribute,
+ v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
ENTER_V8(isolate);
@@ -1291,7 +1347,8 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
setter,
data,
settings,
- attribute);
+ attribute,
+ signature);
}
@@ -1593,9 +1650,10 @@ Local<Script> Script::New(v8::Handle<String> source,
name_obj,
line_offset,
column_offset,
+ isolate->global_context(),
NULL,
pre_data_impl,
- Utils::OpenHandle(*script_data),
+ Utils::OpenHandle(*script_data, true),
i::NOT_NATIVES_CODE,
compile_flags);
has_pending_exception = result.is_null();
@@ -1670,7 +1728,7 @@ Local<Value> Script::Run(Handle<Object> qml) {
fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> qmlglobal = Utils::OpenHandle(*qml);
+ i::Handle<i::Object> qmlglobal = Utils::OpenHandle(*qml, true);
i::Handle<i::Object> receiver(
isolate->context()->global_proxy(), isolate);
i::Handle<i::Object> result =
@@ -3102,6 +3160,17 @@ Local<String> v8::Object::ObjectProtoToString() {
}
+Local<Value> v8::Object::GetConstructor() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetConstructor()",
+ return Local<v8::Function>());
+ ENTER_V8(isolate);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> constructor(self->GetConstructor());
+ return Utils::ToLocal(constructor);
+}
+
+
Local<String> v8::Object::GetConstructorName() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::GetConstructorName()",
@@ -3163,9 +3232,10 @@ bool Object::SetAccessor(Handle<String> name,
ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
- getter, setter, data,
- settings, attributes);
+ v8::Handle<AccessorSignature> signature;
+ i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name, getter, setter, data,
+ settings, attributes,
+ signature);
bool fast = Utils::OpenHandle(this)->HasFastProperties();
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
if (result.is_null() || result->IsUndefined()) return false;
@@ -3293,7 +3363,7 @@ void v8::Object::TurnOnAccessCheck() {
i::Deoptimizer::DeoptimizeGlobalObject(*obj);
i::Handle<i::Map> new_map =
- isolate->factory()->CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
+ isolate->factory()->CopyMap(i::Handle<i::Map>(obj->map()));
new_map->set_is_access_check_needed(true);
obj->set_map(*new_map);
}
@@ -3328,7 +3398,7 @@ static i::Context* GetCreationContext(i::JSObject* object) {
} else {
function = i::JSFunction::cast(constructor);
}
- return function->context()->global_context();
+ return function->context()->native_context();
}
@@ -3357,13 +3427,15 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Handle<v8::Value> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::SetHiddenValue()", return false);
+ if (value.IsEmpty()) return DeleteHiddenValue(key);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::Handle<i::Object> result =
- i::JSObject::SetHiddenProperty(self, key_obj, value_obj);
+ i::JSObject::SetHiddenProperty(self, key_symbol, value_obj);
return *result == *self;
}
@@ -3375,7 +3447,8 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
+ i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
+ i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol));
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -3388,7 +3461,8 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- self->DeleteHiddenProperty(*key_obj);
+ i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
+ self->DeleteHiddenProperty(*key_symbol);
return true;
}
@@ -3456,7 +3530,7 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- if (!ApiCheck(length <= i::ExternalPixelArray::kMaxLength,
+ if (!ApiCheck(length >= 0 && length <= i::ExternalPixelArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
return;
@@ -3512,7 +3586,7 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- if (!ApiCheck(length <= i::ExternalArray::kMaxLength,
+ if (!ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"length exceeds max acceptable value")) {
return;
@@ -3745,8 +3819,9 @@ ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script);
v8::ScriptOrigin origin(
- Utils::ToLocal(i::Handle<i::Object>(script->name())),
+ Utils::ToLocal(scriptName),
v8::Integer::New(script->line_offset()->value()),
v8::Integer::New(script->column_offset()->value()));
return origin;
@@ -3929,26 +4004,6 @@ uint32_t String::ComputeHash(char *string, int length) {
}
-uint16_t String::GetCharacter(int index) {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- return str->Get(index);
-}
-
-
-bool String::Equals(uint16_t *string, int length) {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Equals()")) return 0;
- return str->SlowEqualsExternal(string, length);
-}
-
-
-bool String::Equals(char *string, int length) {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Equals()")) return 0;
- return str->SlowEqualsExternal(string, length);
-}
-
-
int String::WriteUtf8(char* buffer,
int capacity,
int* nchars_ref,
@@ -3958,6 +4013,9 @@ int String::WriteUtf8(char* buffer,
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (options & HINT_MANY_WRITES_EXPECTED) {
+ FlattenString(str); // Flatten the string for efficiency.
+ }
int string_length = str->length();
if (str->IsAsciiRepresentation()) {
int len;
@@ -4014,11 +4072,7 @@ int String::WriteUtf8(char* buffer,
// Slow case.
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
isolate->string_tracker()->RecordWrite(str);
- if (options & HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- FlattenString(str);
- }
+
write_input_buffer.Reset(0, *str);
int len = str->length();
// Encode the first K - 3 bytes directly into the buffer since we
@@ -4060,8 +4114,9 @@ int String::WriteUtf8(char* buffer,
c,
unibrow::Utf16::kNoPreviousCharacter);
if (pos + written <= capacity) {
- for (int j = 0; j < written; j++)
+ for (int j = 0; j < written; j++) {
buffer[pos + j] = intermediate[j];
+ }
pos += written;
nchars++;
} else {
@@ -4074,8 +4129,9 @@ int String::WriteUtf8(char* buffer,
}
if (nchars_ref != NULL) *nchars_ref = nchars;
if (!(options & NO_NULL_TERMINATION) &&
- (i == len && (capacity == -1 || pos < capacity)))
+ (i == len && (capacity == -1 || pos < capacity))) {
buffer[pos++] = '\0';
+ }
return pos;
}
@@ -4088,28 +4144,45 @@ int String::WriteAscii(char* buffer,
if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
LOG_API(isolate, "String::WriteAscii");
ENTER_V8(isolate);
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
+ FlattenString(str); // Flatten the string for efficiency.
}
+
+ if (str->IsAsciiRepresentation()) {
+ // WriteToFlat is faster than using the StringInputBuffer.
+ if (length == -1) length = str->length() + 1;
+ int len = i::Min(length, str->length() - start);
+ i::String::WriteToFlat(*str, buffer, start, start + len);
+ if (!(options & PRESERVE_ASCII_NULL)) {
+ for (int i = 0; i < len; i++) {
+ if (buffer[i] == '\0') buffer[i] = ' ';
+ }
+ }
+ if (!(options & NO_NULL_TERMINATION) && length > len) {
+ buffer[len] = '\0';
+ }
+ return len;
+ }
+
+ i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
int end = length;
- if ( (length == -1) || (length > str->length() - start) )
+ if ((length == -1) || (length > str->length() - start)) {
end = str->length() - start;
+ }
if (end < 0) return 0;
write_input_buffer.Reset(start, *str);
int i;
for (i = 0; i < end; i++) {
char c = static_cast<char>(write_input_buffer.GetNext());
- if (c == '\0') c = ' ';
+ if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
buffer[i] = c;
}
- if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length))
+ if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length)) {
buffer[i] = '\0';
+ }
return i;
}
@@ -4128,7 +4201,7 @@ int String::Write(uint16_t* buffer,
if (options & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
- str->TryFlatten();
+ FlattenString(str);
}
int end = start + length;
if ((length == -1) || (length > str->length() - start) )
@@ -4176,6 +4249,29 @@ void v8::String::VerifyExternalStringResource(
CHECK_EQ(expected, value);
}
+void v8::String::VerifyExternalStringResourceBase(
+ v8::String::ExternalStringResourceBase* value, Encoding encoding) const {
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ const v8::String::ExternalStringResourceBase* expected;
+ Encoding expectedEncoding;
+ if (i::StringShape(*str).IsExternalAscii()) {
+ const void* resource =
+ i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+ expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
+ expectedEncoding = ASCII_ENCODING;
+ } else if (i::StringShape(*str).IsExternalTwoByte()) {
+ const void* resource =
+ i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+ expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
+ expectedEncoding = TWO_BYTE_ENCODING;
+ } else {
+ expected = NULL;
+ expectedEncoding = str->IsAsciiRepresentation() ? ASCII_ENCODING
+ : TWO_BYTE_ENCODING;
+ }
+ CHECK_EQ(expected, value);
+ CHECK_EQ(expectedEncoding, encoding);
+}
const v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
@@ -4314,8 +4410,9 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
i::Handle<i::Foreign> foreign =
isolate->factory()->NewForeign(
reinterpret_cast<i::Address>(value), i::TENURED);
- if (!foreign.is_null())
- Utils::OpenHandle(this)->SetInternalField(index, *foreign);
+ if (!foreign.is_null()) {
+ Utils::OpenHandle(this)->SetInternalField(index, *foreign);
+ }
}
ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@@ -4372,6 +4469,20 @@ void v8::V8::SetReturnAddressLocationResolver(
}
+bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
+ return i::ProfileEntryHookStub::SetFunctionEntryHook(entry_hook);
+}
+
+
+void v8::V8::SetJitCodeEventHandler(
+ JitCodeEventOptions options, JitCodeEventHandler event_handler) {
+ i::Isolate* isolate = i::Isolate::Current();
+ // Ensure that logging is initialized for our isolate.
+ isolate->InitializeLoggingAndCounters();
+ isolate->logger()->SetCodeEventHandler(options, event_handler);
+}
+
+
bool v8::V8::Dispose() {
i::Isolate* isolate = i::Isolate::Current();
if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
@@ -4386,6 +4497,7 @@ bool v8::V8::Dispose() {
HeapStatistics::HeapStatistics(): total_heap_size_(0),
total_heap_size_executable_(0),
+ total_physical_size_(0),
used_heap_size_(0),
heap_size_limit_(0) { }
@@ -4395,6 +4507,7 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
// Isolate is unitialized thus heap is not configured yet.
heap_statistics->set_total_heap_size(0);
heap_statistics->set_total_heap_size_executable(0);
+ heap_statistics->set_total_physical_size(0);
heap_statistics->set_used_heap_size(0);
heap_statistics->set_heap_size_limit(0);
return;
@@ -4404,6 +4517,7 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->set_total_heap_size(heap->CommittedMemory());
heap_statistics->set_total_heap_size_executable(
heap->CommittedMemoryExecutable());
+ heap_statistics->set_total_physical_size(heap->CommittedPhysicalMemory());
heap_statistics->set_used_heap_size(heap->SizeOfObjects());
heap_statistics->set_heap_size_limit(heap->MaxReserved());
}
@@ -4416,6 +4530,30 @@ void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
}
+void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
+
+ i::AssertNoAllocation no_allocation;
+
+ class VisitorAdapter : public i::ObjectVisitor {
+ public:
+ explicit VisitorAdapter(PersistentHandleVisitor* visitor)
+ : visitor_(visitor) {}
+ virtual void VisitPointers(i::Object** start, i::Object** end) {
+ UNREACHABLE();
+ }
+ virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
+ visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)),
+ class_id);
+ }
+ private:
+ PersistentHandleVisitor* visitor_;
+ } visitor_adapter(visitor);
+ isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
+}
+
+
bool v8::V8::IdleNotification(int hint) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
@@ -4506,7 +4644,7 @@ Persistent<Context> v8::Context::New(
// Create the environment.
env = isolate->bootstrapper()->CreateEnvironment(
isolate,
- Utils::OpenHandle(*global_object),
+ Utils::OpenHandle(*global_object, true),
proxy_template,
extensions);
@@ -4550,7 +4688,7 @@ void v8::Context::UseDefaultSecurityToken() {
}
ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
- env->set_security_token(env->global());
+ env->set_security_token(env->global_object());
}
@@ -4595,7 +4733,7 @@ v8::Local<v8::Context> Context::GetCurrent() {
if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
return Local<Context>();
}
- i::Handle<i::Object> current = isolate->global_context();
+ i::Handle<i::Object> current = isolate->native_context();
if (current.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
return Utils::ToLocal(context);
@@ -4608,7 +4746,7 @@ v8::Local<v8::Context> Context::GetCalling() {
return Local<Context>();
}
i::Handle<i::Object> calling =
- isolate->GetCallingGlobalContext();
+ isolate->GetCallingNativeContext();
if (calling.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
return Utils::ToLocal(context);
@@ -4625,8 +4763,8 @@ v8::Local<v8::Object> Context::GetCallingQmlGlobal() {
i::JavaScriptFrameIterator it;
if (it.done()) return Local<Object>();
context = i::Context::cast(it.frame()->context());
- if (!context->qml_global()->IsUndefined()) {
- i::Handle<i::Object> qmlglobal(context->qml_global());
+ if (!context->qml_global_object()->IsUndefined()) {
+ i::Handle<i::Object> qmlglobal(context->qml_global_object());
return Utils::ToLocal(i::Handle<i::JSObject>::cast(qmlglobal));
} else {
return Local<Object>();
@@ -4676,9 +4814,9 @@ void Context::ReattachGlobal(Handle<Object> global_object) {
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- isolate->bootstrapper()->ReattachGlobal(
- context,
- Utils::OpenHandle(*global_object));
+ i::Handle<i::JSGlobalProxy> global_proxy =
+ i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object));
+ isolate->bootstrapper()->ReattachGlobal(context, global_proxy);
}
@@ -4710,11 +4848,32 @@ bool Context::IsCodeGenerationFromStringsAllowed() {
}
+void Context::SetErrorMessageForCodeGenerationFromStrings(
+ Handle<String> error) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate,
+ "v8::Context::SetErrorMessageForCodeGenerationFromStrings()")) {
+ return;
+ }
+ ENTER_V8(isolate);
+ i::Object** ctx = reinterpret_cast<i::Object**>(this);
+ i::Handle<i::Context> context =
+ i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Object> error_handle = Utils::OpenHandle(*error);
+ context->set_error_message_for_code_gen_from_strings(*error_handle);
+}
+
+
void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
}
+uint16_t V8::GetWrapperClassId(internal::Object** global_handle) {
+ return i::GlobalHandles::GetWrapperClassId(global_handle);
+}
+
+
Local<v8::Object> ObjectTemplate::NewInstance() {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
@@ -4932,6 +5091,7 @@ Local<String> v8::String::NewExternal(
EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
LOG_API(isolate, "String::NewExternal");
ENTER_V8(isolate);
+ CHECK(resource && resource->data());
i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
@@ -4952,6 +5112,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
+ CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
isolate->heap()->external_string_table()->AddString(*obj);
@@ -4966,6 +5127,7 @@ Local<String> v8::String::NewExternal(
EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
LOG_API(isolate, "String::NewExternal");
ENTER_V8(isolate);
+ CHECK(resource && resource->data());
i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
@@ -4987,6 +5149,7 @@ bool v8::String::MakeExternal(
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
+ CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
isolate->heap()->external_string_table()->AddString(*obj);
@@ -5236,7 +5399,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!self->HasFastElements()) {
+ if (!self->HasFastObjectElements()) {
return Local<Object>();
}
i::FixedArray* elms = i::FixedArray::cast(self->elements());
@@ -5282,24 +5445,39 @@ Local<Number> v8::Number::New(double value) {
Local<Integer> v8::Integer::New(int32_t value) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
+ return v8::Integer::New(value, reinterpret_cast<Isolate*>(isolate));
+}
+
+
+Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Integer::NewFromUnsigned()");
+ return Integer::NewFromUnsigned(value, reinterpret_cast<Isolate*>(isolate));
+}
+
+
+Local<Integer> v8::Integer::New(int32_t value, Isolate* isolate) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ASSERT(internal_isolate->IsInitialized());
if (i::Smi::IsValid(value)) {
return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
- isolate));
+ internal_isolate));
}
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
+ ENTER_V8(internal_isolate);
+ i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result);
}
-Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
+Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ASSERT(internal_isolate->IsInitialized());
bool fits_into_int32_t = (value & (1 << 31)) == 0;
if (fits_into_int32_t) {
- return Integer::New(static_cast<int32_t>(value));
+ return Integer::New(static_cast<int32_t>(value), isolate);
}
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
+ ENTER_V8(internal_isolate);
+ i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result);
}
@@ -5309,19 +5487,14 @@ void V8::IgnoreOutOfMemoryException() {
}
-bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
+bool V8::AddMessageListener(MessageCallback that) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
NeanderArray listeners(isolate->factory()->message_listeners());
- NeanderObject obj(2);
- obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
- obj.set(1, data.IsEmpty() ?
- isolate->heap()->undefined_value() :
- *Utils::OpenHandle(*data));
- listeners.add(obj.value());
+ listeners.add(isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
return true;
}
@@ -5336,8 +5509,7 @@ void V8::RemoveMessageListeners(MessageCallback that) {
for (int i = 0; i < listeners.length(); i++) {
if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
- NeanderObject listener(i::JSObject::cast(listeners.get(i)));
- i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
+ i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listeners.get(i)));
if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
listeners.set(i, isolate->heap()->undefined_value());
}
@@ -5366,6 +5538,8 @@ void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
isolate->stats_table()->SetCreateHistogramFunction(callback);
+ isolate->InitializeLoggingAndCounters();
+ isolate->counters()->ResetHistograms();
}
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
@@ -5426,8 +5600,9 @@ void V8::AddImplicitReferences(Persistent<Object> parent,
intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() ||
+ IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
return 0;
}
return isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
@@ -5779,7 +5954,8 @@ bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
foreign =
isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
}
- isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
+ isolate->debugger()->SetEventListener(foreign,
+ Utils::OpenHandle(*data, true));
return true;
}
@@ -5794,7 +5970,8 @@ bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
if (that != NULL) {
foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
}
- isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
+ isolate->debugger()->SetEventListener(foreign,
+ Utils::OpenHandle(*data, true));
return true;
}
@@ -5805,7 +5982,7 @@ bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
ENTER_V8(isolate);
isolate->debugger()->SetEventListener(Utils::OpenHandle(*that),
- Utils::OpenHandle(*data));
+ Utils::OpenHandle(*data, true));
return true;
}
@@ -5944,7 +6121,7 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
v8::HandleScope scope;
i::Debug* isolate_debug = isolate->debug();
isolate_debug->Load();
- i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global());
+ i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
i::Handle<i::String> name =
isolate->factory()->LookupAsciiSymbol("MakeMirror");
i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
@@ -5976,6 +6153,7 @@ void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMessages(true);
}
+
Local<Context> Debug::GetDebugContext() {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()");
@@ -5983,6 +6161,20 @@ Local<Context> Debug::GetDebugContext() {
return Utils::ToLocal(i::Isolate::Current()->debugger()->GetDebugContext());
}
+
+void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) {
+ // If no isolate is supplied, use the default isolate.
+ i::Debugger* debugger;
+ if (isolate != NULL) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ debugger = internal_isolate->debugger();
+ } else {
+ debugger = i::Isolate::GetDefaultIsolateDebugger();
+ }
+ debugger->set_live_edit_enabled(enable);
+}
+
+
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -6252,13 +6444,6 @@ int HeapGraphNode::GetSelfSize() const {
}
-int HeapGraphNode::GetRetainedSize() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
- return ToInternal(this)->retained_size();
-}
-
-
int HeapGraphNode::GetChildrenCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
@@ -6274,28 +6459,6 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
}
-int HeapGraphNode::GetRetainersCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
- return ToInternal(this)->retainers().length();
-}
-
-
-const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
- return reinterpret_cast<const HeapGraphEdge*>(
- ToInternal(this)->retainers()[index]);
-}
-
-
-const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
- return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
-}
-
-
v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
@@ -6465,7 +6628,7 @@ void HeapProfiler::StopHeapObjectsTracking() {
}
-void HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
+SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
return i::HeapProfiler::PushHeapObjectsStats(stream);
@@ -6492,6 +6655,11 @@ int HeapProfiler::GetPersistentHandleCount() {
}
+size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
+ return i::HeapProfiler::GetMemorySizeUsedByProfiler();
+}
+
+
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
@@ -6553,6 +6721,7 @@ void Testing::PrepareStressRun(int run) {
void Testing::DeoptimizeAll() {
+ i::HandleScope scope;
internal::Deoptimizer::DeoptimizeAll();
}
@@ -6591,12 +6760,28 @@ char* HandleScopeImplementer::RestoreThread(char* storage) {
void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
+#ifdef DEBUG
+ bool found_block_before_deferred = false;
+#endif
// Iterate over all handles in the blocks except for the last.
for (int i = blocks()->length() - 2; i >= 0; --i) {
Object** block = blocks()->at(i);
- v->VisitPointers(block, &block[kHandleBlockSize]);
+ if (last_handle_before_deferred_block_ != NULL &&
+ (last_handle_before_deferred_block_ < &block[kHandleBlockSize]) &&
+ (last_handle_before_deferred_block_ >= block)) {
+ v->VisitPointers(block, last_handle_before_deferred_block_);
+ ASSERT(!found_block_before_deferred);
+#ifdef DEBUG
+ found_block_before_deferred = true;
+#endif
+ } else {
+ v->VisitPointers(block, &block[kHandleBlockSize]);
+ }
}
+ ASSERT(last_handle_before_deferred_block_ == NULL ||
+ found_block_before_deferred);
+
// Iterate over live handles in the last block (if any).
if (!blocks()->is_empty()) {
v->VisitPointers(blocks()->last(), handle_scope_data_.next);
@@ -6624,4 +6809,66 @@ char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
return storage + ArchiveSpacePerThread();
}
+
+DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
+ DeferredHandles* deferred =
+ new DeferredHandles(isolate()->handle_scope_data()->next, isolate());
+
+ while (!blocks_.is_empty()) {
+ Object** block_start = blocks_.last();
+ Object** block_limit = &block_start[kHandleBlockSize];
+ // We should not need to check for NoHandleAllocation here. Assert
+ // this.
+ ASSERT(prev_limit == block_limit ||
+ !(block_start <= prev_limit && prev_limit <= block_limit));
+ if (prev_limit == block_limit) break;
+ deferred->blocks_.Add(blocks_.last());
+ blocks_.RemoveLast();
+ }
+
+ // deferred->blocks_ now contains the blocks installed on the
+ // HandleScope stack since BeginDeferredScope was called, but in
+ // reverse order.
+
+ ASSERT(prev_limit == NULL || !blocks_.is_empty());
+
+ ASSERT(!blocks_.is_empty() && prev_limit != NULL);
+ ASSERT(last_handle_before_deferred_block_ != NULL);
+ last_handle_before_deferred_block_ = NULL;
+ return deferred;
+}
+
+
+void HandleScopeImplementer::BeginDeferredScope() {
+ ASSERT(last_handle_before_deferred_block_ == NULL);
+ last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next;
+}
+
+
+DeferredHandles::~DeferredHandles() {
+ isolate_->UnlinkDeferredHandles(this);
+
+ for (int i = 0; i < blocks_.length(); i++) {
+#ifdef DEBUG
+ HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
+#endif
+ isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]);
+ }
+}
+
+
+void DeferredHandles::Iterate(ObjectVisitor* v) {
+ ASSERT(!blocks_.is_empty());
+
+ ASSERT((first_block_limit_ >= blocks_.first()) &&
+ (first_block_limit_ <= &(blocks_.first())[kHandleBlockSize]));
+
+ v->VisitPointers(blocks_.first(), first_block_limit_);
+
+ for (int i = 1; i < blocks_.length(); i++) {
+ v->VisitPointers(blocks_[i], &blocks_[i][kHandleBlockSize]);
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/api.h b/src/3rdparty/v8/src/api.h
index 3ad57f4..7197b6c 100644
--- a/src/3rdparty/v8/src/api.h
+++ b/src/3rdparty/v8/src/api.h
@@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
v8::internal::Object* NeanderObject::get(int offset) {
- ASSERT(value()->HasFastElements());
+ ASSERT(value()->HasFastObjectElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset);
}
void NeanderObject::set(int offset, v8::internal::Object* value) {
- ASSERT(value_->HasFastElements());
+ ASSERT(value_->HasFastObjectElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
}
@@ -159,6 +159,27 @@ class RegisteredExtension {
};
+#define OPEN_HANDLE_LIST(V) \
+ V(Template, TemplateInfo) \
+ V(FunctionTemplate, FunctionTemplateInfo) \
+ V(ObjectTemplate, ObjectTemplateInfo) \
+ V(Signature, SignatureInfo) \
+ V(AccessorSignature, FunctionTemplateInfo) \
+ V(TypeSwitch, TypeSwitchInfo) \
+ V(Data, Object) \
+ V(RegExp, JSRegExp) \
+ V(Object, JSObject) \
+ V(Array, JSArray) \
+ V(String, String) \
+ V(Script, Object) \
+ V(Function, JSFunction) \
+ V(Message, JSObject) \
+ V(Context, Context) \
+ V(External, Foreign) \
+ V(StackTrace, JSArray) \
+ V(StackFrame, JSObject)
+
+
class Utils {
public:
static bool ReportApiFailure(const char* location, const char* message);
@@ -200,43 +221,18 @@ class Utils {
v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
static inline Local<Signature> ToLocal(
v8::internal::Handle<v8::internal::SignatureInfo> obj);
+ static inline Local<AccessorSignature> AccessorSignatureToLocal(
+ v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
- static inline v8::internal::Handle<v8::internal::TemplateInfo>
- OpenHandle(const Template* that);
- static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
- OpenHandle(const FunctionTemplate* that);
- static inline v8::internal::Handle<v8::internal::ObjectTemplateInfo>
- OpenHandle(const ObjectTemplate* that);
- static inline v8::internal::Handle<v8::internal::Object>
- OpenHandle(const Data* data);
- static inline v8::internal::Handle<v8::internal::JSRegExp>
- OpenHandle(const RegExp* data);
- static inline v8::internal::Handle<v8::internal::JSObject>
- OpenHandle(const v8::Object* data);
- static inline v8::internal::Handle<v8::internal::JSArray>
- OpenHandle(const v8::Array* data);
- static inline v8::internal::Handle<v8::internal::String>
- OpenHandle(const String* data);
- static inline v8::internal::Handle<v8::internal::Object>
- OpenHandle(const Script* data);
- static inline v8::internal::Handle<v8::internal::JSFunction>
- OpenHandle(const Function* data);
- static inline v8::internal::Handle<v8::internal::JSObject>
- OpenHandle(const Message* message);
- static inline v8::internal::Handle<v8::internal::JSArray>
- OpenHandle(const StackTrace* stack_trace);
- static inline v8::internal::Handle<v8::internal::JSObject>
- OpenHandle(const StackFrame* stack_frame);
- static inline v8::internal::Handle<v8::internal::Context>
- OpenHandle(const v8::Context* context);
- static inline v8::internal::Handle<v8::internal::SignatureInfo>
- OpenHandle(const v8::Signature* sig);
- static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
- OpenHandle(const v8::TypeSwitch* that);
- static inline v8::internal::Handle<v8::internal::Foreign>
- OpenHandle(const v8::External* that);
+#define DECLARE_OPEN_HANDLE(From, To) \
+ static inline v8::internal::Handle<v8::internal::To> \
+ OpenHandle(const From* that, bool allow_empty_handle = false);
+
+OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
+
+#undef DECLARE_OPEN_HANDLE
};
@@ -253,7 +249,7 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
if (!is_null()) {
handle = *this;
}
- return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)));
+ return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)), true);
}
@@ -276,6 +272,7 @@ MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
+MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
@@ -289,32 +286,18 @@ MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
// Implementations of OpenHandle
-#define MAKE_OPEN_HANDLE(From, To) \
- v8::internal::Handle<v8::internal::To> Utils::OpenHandle(\
- const v8::From* that) { \
- return v8::internal::Handle<v8::internal::To>( \
+#define MAKE_OPEN_HANDLE(From, To) \
+ v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
+ const v8::From* that, bool allow_empty_handle) { \
+ EXTRA_CHECK(allow_empty_handle || that != NULL); \
+ return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
}
-MAKE_OPEN_HANDLE(Template, TemplateInfo)
-MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
-MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
-MAKE_OPEN_HANDLE(Signature, SignatureInfo)
-MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
-MAKE_OPEN_HANDLE(Data, Object)
-MAKE_OPEN_HANDLE(RegExp, JSRegExp)
-MAKE_OPEN_HANDLE(Object, JSObject)
-MAKE_OPEN_HANDLE(Array, JSArray)
-MAKE_OPEN_HANDLE(String, String)
-MAKE_OPEN_HANDLE(Script, Object)
-MAKE_OPEN_HANDLE(Function, JSFunction)
-MAKE_OPEN_HANDLE(Message, JSObject)
-MAKE_OPEN_HANDLE(Context, Context)
-MAKE_OPEN_HANDLE(External, Foreign)
-MAKE_OPEN_HANDLE(StackTrace, JSArray)
-MAKE_OPEN_HANDLE(StackFrame, JSObject)
+OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
#undef MAKE_OPEN_HANDLE
+#undef OPEN_HANDLE_LIST
namespace internal {
@@ -386,6 +369,32 @@ class StringTracker {
};
+class DeferredHandles {
+ public:
+ ~DeferredHandles();
+
+ private:
+ DeferredHandles(Object** first_block_limit, Isolate* isolate)
+ : next_(NULL),
+ previous_(NULL),
+ first_block_limit_(first_block_limit),
+ isolate_(isolate) {
+ isolate->LinkDeferredHandles(this);
+ }
+
+ void Iterate(ObjectVisitor* v);
+
+ List<Object**> blocks_;
+ DeferredHandles* next_;
+ DeferredHandles* previous_;
+ Object** first_block_limit_;
+ Isolate* isolate_;
+
+ friend class HandleScopeImplementer;
+ friend class Isolate;
+};
+
+
// This class is here in order to be able to declare it a friend of
// HandleScope. Moving these methods to be members of HandleScope would be
// neat in some ways, but it would expose internal implementation details in
@@ -403,7 +412,8 @@ class HandleScopeImplementer {
entered_contexts_(0),
saved_contexts_(0),
spare_(NULL),
- call_depth_(0) { }
+ call_depth_(0),
+ last_handle_before_deferred_block_(NULL) { }
~HandleScopeImplementer() {
DeleteArray(spare_);
@@ -439,6 +449,13 @@ class HandleScopeImplementer {
inline bool HasSavedContexts();
inline List<internal::Object**>* blocks() { return &blocks_; }
+ Isolate* isolate() const { return isolate_; }
+
+ void ReturnBlock(Object** block) {
+ ASSERT(block != NULL);
+ if (spare_ != NULL) DeleteArray(spare_);
+ spare_ = block;
+ }
private:
void ResetAfterArchive() {
@@ -446,6 +463,7 @@ class HandleScopeImplementer {
entered_contexts_.Initialize(0);
saved_contexts_.Initialize(0);
spare_ = NULL;
+ last_handle_before_deferred_block_ = NULL;
call_depth_ = 0;
}
@@ -463,6 +481,9 @@ class HandleScopeImplementer {
ASSERT(call_depth_ == 0);
}
+ void BeginDeferredScope();
+ DeferredHandles* Detach(Object** prev_limit);
+
Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
@@ -471,6 +492,7 @@ class HandleScopeImplementer {
List<Context*> saved_contexts_;
Object** spare_;
int call_depth_;
+ Object** last_handle_before_deferred_block_;
// This is only used for threading support.
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
@@ -478,6 +500,9 @@ class HandleScopeImplementer {
char* RestoreThreadHelper(char* from);
char* ArchiveThreadHelper(char* to);
+ friend class DeferredHandles;
+ friend class DeferredHandleScope;
+
DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
};
diff --git a/src/3rdparty/v8/src/arm/assembler-arm-inl.h b/src/3rdparty/v8/src/arm/assembler-arm-inl.h
index d5db686..acd61fe 100644
--- a/src/3rdparty/v8/src/arm/assembler-arm-inl.h
+++ b/src/3rdparty/v8/src/arm/assembler-arm-inl.h
@@ -75,7 +75,7 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
+ return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
}
@@ -97,25 +97,30 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(Assembler::target_address_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_pointer_at(pc_)));
}
Object** RelocInfo::target_object_address() {
+ // Provide a "natural pointer" to the embedded object,
+ // which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
+ reconstructed_obj_ptr_ =
+ reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
+ return &reconstructed_obj_ptr_;
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@@ -127,7 +132,8 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
+ reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
+ return &reconstructed_adr_ptr_;
}
@@ -141,10 +147,7 @@ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
+ return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
}
@@ -162,6 +165,24 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
+static const int kNoCodeAgeSequenceLength = 3;
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)) =
+ stub->instruction_start();
+}
+
+
Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
@@ -235,6 +256,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -261,6 +284,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -329,7 +354,7 @@ void Assembler::emit(Instr x) {
}
-Address Assembler::target_address_address_at(Address pc) {
+Address Assembler::target_pointer_address_at(Address pc) {
Address target_pc = pc;
Instr instr = Memory::int32_at(target_pc);
// If we have a bx instruction, the instruction before the bx is
@@ -359,8 +384,63 @@ Address Assembler::target_address_address_at(Address pc) {
}
-Address Assembler::target_address_at(Address pc) {
- return Memory::Address_at(target_address_address_at(pc));
+Address Assembler::target_pointer_at(Address pc) {
+ if (IsMovW(Memory::int32_at(pc))) {
+ ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ Instruction* instr = Instruction::At(pc);
+ Instruction* next_instr = Instruction::At(pc + kInstrSize);
+ return reinterpret_cast<Address>(
+ (next_instr->ImmedMovwMovtValue() << 16) |
+ instr->ImmedMovwMovtValue());
+ }
+ return Memory::Address_at(target_pointer_address_at(pc));
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ // Returns the address of the call target from the return address that will
+ // be returned to after a call.
+#ifdef USE_BLX
+ // Call sequence on V7 or later is :
+ // movw ip, #... @ call address low 16
+ // movt ip, #... @ call address high 16
+ // blx ip
+ // @ return address
+ // Or pre-V7 or cases that need frequent patching:
+ // ldr ip, [pc, #...] @ call address
+ // blx ip
+ // @ return address
+ Address candidate = pc - 2 * Assembler::kInstrSize;
+ Instr candidate_instr(Memory::int32_at(candidate));
+ if (IsLdrPcImmediateOffset(candidate_instr)) {
+ return candidate;
+ }
+ candidate = pc - 3 * Assembler::kInstrSize;
+ ASSERT(IsMovW(Memory::int32_at(candidate)) &&
+ IsMovT(Memory::int32_at(candidate + kInstrSize)));
+ return candidate;
+#else
+ // Call sequence is:
+ // mov lr, pc
+ // ldr pc, [pc, #...] @ call address
+ // @ return address
+ return pc - kInstrSize;
+#endif
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+#ifdef USE_BLX
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
+ return pc + kInstrSize * 2;
+ } else {
+ ASSERT(IsMovW(Memory::int32_at(pc)));
+ ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ return pc + kInstrSize * 3;
+ }
+#else
+ return pc + kInstrSize;
+#endif
}
@@ -376,17 +456,53 @@ void Assembler::set_external_target_at(Address constant_pool_entry,
}
+static Instr EncodeMovwImmediate(uint32_t immediate) {
+ ASSERT(immediate < 0x10000);
+ return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+}
+
+
+void Assembler::set_target_pointer_at(Address pc, Address target) {
+ if (IsMovW(Memory::int32_at(pc))) {
+ ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
+ uint32_t immediate = reinterpret_cast<uint32_t>(target);
+ uint32_t intermediate = instr_ptr[0];
+ intermediate &= ~EncodeMovwImmediate(0xFFFF);
+ intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
+ instr_ptr[0] = intermediate;
+ intermediate = instr_ptr[1];
+ intermediate &= ~EncodeMovwImmediate(0xFFFF);
+ intermediate |= EncodeMovwImmediate(immediate >> 16);
+ instr_ptr[1] = intermediate;
+ ASSERT(IsMovW(Memory::int32_at(pc)));
+ ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ CPU::FlushICache(pc, 2 * kInstrSize);
+ } else {
+ ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ Memory::Address_at(target_pointer_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to always flush the
+ // instruction cache after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, no instruction is actually patched in the case
+ // of embedded constants of the form:
+ // ldr ip, [pc, #...]
+ // since the instruction accessing this address in the constant pool remains
+ // unchanged.
+ }
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return target_pointer_at(pc);
+}
+
+
void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::Address_at(target_address_address_at(pc)) = target;
- // Intuitively, we would think it is necessary to flush the instruction cache
- // after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
- // However, on ARM, no instruction was actually patched by the assignment
- // above; the target address is not part of an instruction, it is patched in
- // the constant pool and is read via a data access; the instruction accessing
- // this address in the constant pool remains unchanged.
+ set_target_pointer_at(pc, target);
}
+
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.cc b/src/3rdparty/v8/src/arm/assembler-arm.cc
index ec28da4..b679efa 100644
--- a/src/3rdparty/v8/src/arm/assembler-arm.cc
+++ b/src/3rdparty/v8/src/arm/assembler-arm.cc
@@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -52,17 +52,20 @@ unsigned CpuFeatures::found_by_runtime_probing_ = 0;
// Get the CPU features enabled by the build. For cross compilation the
-// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
+// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
// snapshot.
-static uint64_t CpuFeaturesImpliedByCompiler() {
- uint64_t answer = 0;
+static unsigned CpuFeaturesImpliedByCompiler() {
+ unsigned answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
answer |= 1u << ARMv7;
-#endif // def CAN_USE_ARMV7_INSTRUCTIONS
-#ifdef CAN_USE_VFP_INSTRUCTIONS
- answer |= 1u << VFP3 | 1u << ARMv7;
-#endif // def CAN_USE_VFP_INSTRUCTIONS
+#endif // CAN_USE_ARMV7_INSTRUCTIONS
+#ifdef CAN_USE_VFP3_INSTRUCTIONS
+ answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
+#endif // CAN_USE_VFP3_INSTRUCTIONS
+#ifdef CAN_USE_VFP2_INSTRUCTIONS
+ answer |= 1u << VFP2;
+#endif // CAN_USE_VFP2_INSTRUCTIONS
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
@@ -70,18 +73,21 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
// point support implies VFPv3, see ARM DDI 0406B, page A1-6.
#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
&& !defined(__SOFTFP__)
- answer |= 1u << VFP3 | 1u << ARMv7;
+ answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
// && !defined(__SOFTFP__)
-#endif // def __arm__
+#endif // _arm__
+ if (answer & (1u << ARMv7)) {
+ answer |= 1u << UNALIGNED_ACCESSES;
+ }
return answer;
}
void CpuFeatures::Probe() {
- unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
- CpuFeaturesImpliedByCompiler());
+ unsigned standard_features = static_cast<unsigned>(
+ OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
initialized_ = true;
@@ -101,27 +107,53 @@ void CpuFeatures::Probe() {
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
// enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3 | 1u << ARMv7;
+ supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
}
-#else // def __arm__
+
+ if (FLAG_enable_sudiv) {
+ supported_ |= 1u << SUDIV;
+ }
+
+ if (FLAG_enable_movw_movt) {
+ supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ }
+#else // __arm__
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if runtime
- // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
+ // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
// 0406B, page A1-6.
- supported_ |= 1u << VFP3 | 1u << ARMv7;
- found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
+ found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
+ } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
+ found_by_runtime_probing_ |= 1u << VFP2;
}
if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
- supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
+
+ if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) {
+ found_by_runtime_probing_ |= 1u << SUDIV;
+ }
+
+ if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) {
+ found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES;
+ }
+
+ if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
+ OS::ArmCpuHasFeature(ARMv7)) {
+ found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ }
+
+ supported_ |= found_by_runtime_probing_;
#endif
+
+ // Assert that VFP3 implies VFP2 and ARMv7.
+ ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
}
@@ -292,8 +324,8 @@ static const int kMinimalBufferSize = 4*KB;
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
+ recorded_ast_id_(TypeFeedbackId::None()),
+ positions_recorder_(this) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -705,12 +737,6 @@ void Assembler::next(Label* L) {
}
-static Instr EncodeMovwImmediate(uint32_t immediate) {
- ASSERT(immediate < 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
-}
-
-
// Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction
@@ -746,7 +772,7 @@ static bool fits_shifter(uint32_t imm32,
}
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
- if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kCmpCmnFlip;
return true;
}
@@ -754,7 +780,7 @@ static bool fits_shifter(uint32_t imm32,
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == ADD ||
alu_insn == SUB) {
- if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
@@ -775,13 +801,14 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-bool Operand::must_use_constant_pool() const {
+bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif // def DEBUG
+ if (assembler != NULL && assembler->predictable_code_size()) return true;
return Serializer::enabled();
} else if (rmode_ == RelocInfo::NONE) {
return false;
@@ -790,24 +817,28 @@ bool Operand::must_use_constant_pool() const {
}
-bool Operand::is_single_instruction(Instr instr) const {
+static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
+ if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
+ return true;
+ }
+ if (x.must_output_reloc_info(assembler)) {
+ return false;
+ }
+ return CpuFeatures::IsSupported(ARMv7);
+}
+
+
+bool Operand::is_single_instruction(const Assembler* assembler,
+ Instr instr) const {
if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2;
- if (must_use_constant_pool() ||
+ if (must_output_reloc_info(assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (must_use_constant_pool() ||
- !CpuFeatures::IsSupported(ARMv7)) {
- // mov instruction will be an ldr from constant pool (one instruction).
- return true;
- } else {
- // mov instruction will be a mov or movw followed by movt (two
- // instructions).
- return false;
- }
+ return !use_movw_movt(*this, assembler);
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
@@ -823,6 +854,29 @@ bool Operand::is_single_instruction(Instr instr) const {
}
+void Assembler::move_32_bit_immediate(Condition cond,
+ Register rd,
+ SBit s,
+ const Operand& x) {
+ if (rd.code() != pc.code() && s == LeaveCC) {
+ if (use_movw_movt(x, this)) {
+ if (x.must_output_reloc_info(this)) {
+ RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
+ // Make sure the movw/movt doesn't get separated.
+ BlockConstPoolFor(2);
+ }
+ emit(cond | 0x30*B20 | rd.code()*B12 |
+ EncodeMovwImmediate(x.imm32_ & 0xffff));
+ movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ return;
+ }
+ }
+
+ RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
+ ldr(rd, MemOperand(pc, 0), cond);
+}
+
+
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,
@@ -833,7 +887,7 @@ void Assembler::addrmod1(Instr instr,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (x.must_use_constant_pool() ||
+ if (x.must_output_reloc_info(this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
@@ -842,24 +896,19 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (x.must_use_constant_pool() ||
- !CpuFeatures::IsSupported(ARMv7)) {
- RecordRelocInfo(x.rmode_, x.imm32_);
- ldr(rd, MemOperand(pc, 0), cond);
- } else {
- // Will probably use movw, will certainly not use constant pool.
- mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- }
+ move_32_bit_immediate(cond, rd, LeaveCC, x);
} else {
- // If this is not a mov or mvn instruction we may still be able to avoid
- // a constant pool entry by using mvn or movw.
- if (!x.must_use_constant_pool() &&
- (instr & kMovMvnMask) != kMovMvnPattern) {
- mov(ip, x, LeaveCC, cond);
- } else {
- RecordRelocInfo(x.rmode_, x.imm32_);
+ if ((instr & kMovMvnMask) == kMovMvnPattern) {
+ // Moves need to use a constant pool entry.
+ RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
ldr(ip, MemOperand(pc, 0), cond);
+ } else if (x.must_output_reloc_info(this)) {
+ // Otherwise, use most efficient form of fetching from constant pool.
+ move_32_bit_immediate(cond, ip, LeaveCC, x);
+ } else {
+ // If this is not a mov or mvn instruction we may still be able to
+ // avoid a constant pool entry by using mvn or movw.
+ mov(ip, x, LeaveCC, cond);
}
addrmod1(instr, rn, rd, Operand(ip));
}
@@ -1166,6 +1215,9 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
ASSERT(immediate < 0x10000);
+ // May use movw if supported, but on unsupported platforms will try to use
+ // equivalent rotated immed_8 value and other tricks before falling back to a
+ // constant pool load.
mov(reg, Operand(immediate), LeaveCC, cond);
}
@@ -1195,6 +1247,22 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
}
+void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
+ Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::sdiv(Register dst, Register src1, Register src2,
+ Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
+ src2.code()*B8 | B4 | src1.code());
+}
+
+
void Assembler::mul(Register dst, Register src1, Register src2,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
@@ -1379,7 +1447,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (src.must_use_constant_pool() ||
+ if (src.must_output_reloc_info(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
@@ -1656,7 +1724,7 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1698,7 +1766,7 @@ void Assembler::vldr(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1742,7 +1810,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
// Vsrc(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1783,7 +1851,7 @@ void Assembler::vstr(const SwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1814,7 +1882,7 @@ void Assembler::vstr(const SwVfpRegister src,
const Condition cond) {
ASSERT(!operand.rm().is_valid());
ASSERT(operand.am_ == Offset);
- vldr(src, operand.rn(), operand.offset(), cond);
+ vstr(src, operand.rn(), operand.offset(), cond);
}
@@ -1826,7 +1894,7 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1834,6 +1902,7 @@ void Assembler::vldm(BlockAddrMode am,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
+ ASSERT(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@@ -1847,7 +1916,7 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1855,6 +1924,7 @@ void Assembler::vstm(BlockAddrMode am,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
+ ASSERT(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@@ -1867,7 +1937,7 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1888,7 +1958,7 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1911,7 +1981,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsSupported(VFP3));
// VMOV can accept an immediate of the form:
//
@@ -1961,13 +2031,14 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
void Assembler::vmov(const DwVfpRegister dst,
double imm,
+ const Register scratch,
const Condition cond) {
// Dd = immediate
// Instruction details available in ARM DDI 0406B, A8-640.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
uint32_t enc;
- if (FitsVMOVDoubleImmediate(imm, &enc)) {
+ if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
} else {
@@ -1975,22 +2046,22 @@ void Assembler::vmov(const DwVfpRegister dst,
// using vldr from a constant pool.
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
+ mov(ip, Operand(lo));
- if (lo == hi) {
- // If the lo and hi parts of the double are equal, the literal is easier
- // to create. This is the case with 0.0.
- mov(ip, Operand(lo));
- vmov(dst, ip, ip);
- } else {
+ if (scratch.is(no_reg)) {
// Move the low part of the double into the lower of the corresponsing S
// registers of D register dst.
- mov(ip, Operand(lo));
vmov(dst.low(), ip, cond);
// Move the high part of the double into the higher of the corresponsing S
// registers of D register dst.
mov(ip, Operand(hi));
vmov(dst.high(), ip, cond);
+ } else {
+ // Move the low and high parts of the double to a D register in one
+ // instruction.
+ mov(scratch, Operand(hi));
+ vmov(dst, ip, scratch, cond);
}
}
}
@@ -2001,7 +2072,7 @@ void Assembler::vmov(const SwVfpRegister dst,
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@@ -2014,7 +2085,7 @@ void Assembler::vmov(const DwVfpRegister dst,
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
}
@@ -2028,7 +2099,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src1.is(pc) && !src2.is(pc));
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
src1.code()*B12 | 0xB*B8 | B4 | dst.code());
@@ -2043,7 +2114,7 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
dst1.code()*B12 | 0xB*B8 | B4 | src.code());
@@ -2057,7 +2128,7 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
@@ -2072,7 +2143,7 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
@@ -2197,7 +2268,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@@ -2206,7 +2277,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@@ -2215,7 +2286,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@@ -2224,7 +2295,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2233,7 +2304,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2242,7 +2313,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@@ -2251,7 +2322,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2259,6 +2330,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
0x5*B9 | B8 | B6 | src.code());
}
@@ -2267,6 +2339,7 @@ void Assembler::vneg(const DwVfpRegister dst,
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
0x5*B9 | B8 | 0x3*B6 | src.code());
}
@@ -2281,7 +2354,7 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@@ -2296,7 +2369,7 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@@ -2311,7 +2384,7 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@@ -2326,7 +2399,7 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@@ -2339,7 +2412,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@@ -2352,7 +2425,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
src1.code()*B12 | 0x5*B9 | B8 | B6);
@@ -2363,7 +2436,7 @@ void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2373,7 +2446,7 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2384,7 +2457,7 @@ void Assembler::vsqrt(const DwVfpRegister dst,
const Condition cond) {
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
}
@@ -2392,15 +2465,35 @@ void Assembler::vsqrt(const DwVfpRegister dst,
// Pseudo instructions.
void Assembler::nop(int type) {
- // This is mov rx, rx.
- ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
+ // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
+ // some of the CPU's pipeline and has to issue. Older ARM chips simply used
+ // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
+ // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
+ // a type.
+ ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
emit(al | 13*B21 | type*B12 | type);
}
+bool Assembler::IsMovT(Instr instr) {
+ instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
+ ((kNumRegisters-1)*B12) | // mask out register
+ EncodeMovwImmediate(0xFFFF)); // mask out immediate value
+ return instr == 0x34*B20;
+}
+
+
+bool Assembler::IsMovW(Instr instr) {
+ instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
+ ((kNumRegisters-1)*B12) | // mask out destination
+ EncodeMovwImmediate(0xFFFF)); // mask out immediate value
+ return instr == 0x30*B20;
+}
+
+
bool Assembler::IsNop(Instr instr, int type) {
+ ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
// Check for mov rx, rx where x = type.
- ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}
@@ -2435,6 +2528,14 @@ void Assembler::RecordComment(const char* msg) {
}
+void Assembler::RecordConstPool(int size) {
+ // We only need this for debugger support, to correctly compute offsets in the
+ // code.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+#endif
+}
+
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -2508,15 +2609,21 @@ void Assembler::dd(uint32_t data) {
}
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
+ UseConstantPoolMode mode) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
+ if (((rmode >= RelocInfo::JS_RETURN) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ (rmode == RelocInfo::CONST_POOL) ||
+ mode == DONT_USE_CONSTANT_POOL) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
+ || RelocInfo::IsPosition(rmode)
+ || RelocInfo::IsConstPool(rmode)
+ || mode == DONT_USE_CONSTANT_POOL);
// These modes do not need an entry in the constant pool.
} else {
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
@@ -2542,7 +2649,10 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
+ RelocInfo reloc_info_with_ast_id(pc_,
+ rmode,
+ RecordedAstId().ToInt(),
+ NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -2602,13 +2712,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
- int needed_space = jump_instr + kInstrSize +
- num_pending_reloc_info_ * kInstrSize + kGap;
+ int size = jump_instr + kInstrSize + num_pending_reloc_info_ * kPointerSize;
+ int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
{
// Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this);
+ RecordComment("[ Constant Pool");
+ RecordConstPool(size);
// Emit jump over constant pool if necessary.
Label after_pool;
@@ -2616,32 +2728,33 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
b(&after_pool);
}
- RecordComment("[ Constant Pool");
-
- // Put down constant pool marker "Undefined instruction" as specified by
- // A5.6 (ARMv7) Instruction set encoding.
- emit(kConstantPoolMarker | num_pending_reloc_info_);
+ // Put down constant pool marker "Undefined instruction".
+ emit(kConstantPoolMarker |
+ EncodeConstantPoolLength(num_pending_reloc_info_));
// Emit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
+ rinfo.rmode() != RelocInfo::CONST_POOL);
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- ASSERT(IsLdrPcImmediateOffset(instr) &&
- GetLdrRegisterImmediateOffset(instr) == 0);
-
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
- ASSERT(is_uint12(delta));
-
- instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+ if (IsLdrPcImmediateOffset(instr) &&
+ GetLdrRegisterImmediateOffset(instr) == 0) {
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+ ASSERT(is_uint12(delta));
+
+ instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+ } else {
+ ASSERT(IsMovW(instr));
+ }
emit(rinfo.data());
}
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.h b/src/3rdparty/v8/src/arm/assembler-arm.h
index e2d5f59..8418aee 100644
--- a/src/3rdparty/v8/src/arm/assembler-arm.h
+++ b/src/3rdparty/v8/src/arm/assembler-arm.h
@@ -424,8 +424,8 @@ class Operand BASE_EMBEDDED {
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
- bool is_single_instruction(Instr instr = 0) const;
- bool must_use_constant_pool() const;
+ bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
+ bool must_output_reloc_info(const Assembler* assembler) const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
@@ -510,6 +510,11 @@ class CpuFeatures : public AllStatic {
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
+ if (f == VFP2 && !FLAG_enable_vfp2) return false;
+ if (f == SUDIV && !FLAG_enable_sudiv) return false;
+ if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
+ return false;
+ }
return (supported_ & (1u << f)) != 0;
}
@@ -535,6 +540,8 @@ class CpuFeatures : public AllStatic {
public:
explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
+ // VFP2 and ARMv7 are implied by VFP3.
+ if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
(CpuFeatures::found_by_runtime_probing_ & mask) == 0);
@@ -642,9 +649,6 @@ class Assembler : public AssemblerBase {
Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -677,13 +681,25 @@ class Assembler : public AssemblerBase {
void label_at_put(Label* L, int at_offset);
// Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc.
- INLINE(static Address target_address_address_at(Address pc));
+ // the branch/call instruction at pc, or the object in a mov.
+ INLINE(static Address target_pointer_address_at(Address pc));
+
+ // Read/Modify the pointer in the branch/call/move instruction at pc.
+ INLINE(static Address target_pointer_at(Address pc));
+ INLINE(static void set_target_pointer_at(Address pc, Address target));
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ INLINE(static Address target_address_from_return_address(Address pc));
+
+ // Given the address of the beginning of a call, return the address
+ // in the instruction stream that the call will return from.
+ INLINE(static Address return_address_from_call_start(Address pc));
+
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -702,22 +718,6 @@ class Assembler : public AssemblerBase {
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
- // Distance between the instruction referring to the address of the call
- // target and the return address.
-#ifdef USE_BLX
- // Call sequence is:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
- static const int kCallTargetAddressOffset = 2 * kInstrSize;
-#else
- // Call sequence is:
- // mov lr, pc
- // ldr pc, [pc, #...] @ call address
- // @ return address
- static const int kCallTargetAddressOffset = kInstrSize;
-#endif
-
// Distance between start of patched return sequence and the emitted address
// to jump to.
#ifdef USE_BLX
@@ -746,6 +746,12 @@ class Assembler : public AssemblerBase {
static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
#endif
+#ifdef USE_BLX
+ static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
+#else
+ static const int kPatchDebugBreakSlotReturnOffset = kInstrSize;
+#endif
+
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 8;
@@ -861,6 +867,12 @@ class Assembler : public AssemblerBase {
void mla(Register dst, Register src1, Register src2, Register srcA,
SBit s = LeaveCC, Condition cond = al);
+ void mls(Register dst, Register src1, Register src2, Register srcA,
+ Condition cond = al);
+
+ void sdiv(Register dst, Register src1, Register src2,
+ Condition cond = al);
+
void mul(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al);
@@ -1045,6 +1057,7 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst,
double imm,
+ const Register scratch = no_reg,
const Condition cond = al);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
@@ -1164,6 +1177,20 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
+ static bool use_immediate_embedded_pointer_loads(
+ const Assembler* assembler) {
+#ifdef USE_BLX
+ return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ (assembler == NULL || !assembler->predictable_code_size());
+#else
+ // If not using BLX, all loads from the constant pool cannot be immediate,
+ // because the ldr pc, [pc + #xxxx] used for calls must be a single
+ // instruction and cannot be easily distinguished out of context from
+ // other loads that could use movw/movt.
+ return false;
+#endif
+ }
+
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1203,22 +1230,41 @@ class Assembler : public AssemblerBase {
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
- void SetRecordedAstId(unsigned ast_id) {
- ASSERT(recorded_ast_id_ == kNoASTId);
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
- unsigned RecordedAstId() {
- ASSERT(recorded_ast_id_ != kNoASTId);
+ TypeFeedbackId RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
- void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
+ void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
+ // Record the emission of a constant pool.
+ //
+ // The emission of constant pool depends on the size of the code generated and
+ // the number of RelocInfo recorded.
+ // The Debug mechanism needs to map code offsets between two versions of a
+ // function, compiled with and without debugger support (see for example
+ // Debug::PrepareForBreakPoints()).
+ // Compiling functions with debugger support generates additional code
+ // (Debug::GenerateSlot()). This may affect the emission of the constant
+ // pools and cause the version of the code with debugger support to have
+ // constant pools generated in different places.
+ // Recording the position and size of emitted constant pools allows to
+ // correctly compute the offset mappings between the different versions of a
+ // function in all situations.
+ //
+ // The parameter indicates the size of the constant pool (in bytes), including
+ // the marker and branch over the data.
+ void RecordConstPool(int size);
+
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables. The constant pool should be
// emitted before any use of db and dd to ensure that constant pools
@@ -1265,12 +1311,16 @@ class Assembler : public AssemblerBase {
static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
+ static bool IsMovT(Instr instr);
+ static bool IsMovW(Instr instr);
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant.
static const int kMaxDistToPool = 4*KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
+ STATIC_ASSERT((kConstantPoolLengthMaxMask & kMaxNumPendingRelocInfo) ==
+ kMaxNumPendingRelocInfo);
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -1283,9 +1333,7 @@ class Assembler : public AssemblerBase {
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
- unsigned recorded_ast_id_;
-
- bool emit_debug_code() const { return emit_debug_code_; }
+ TypeFeedbackId recorded_ast_id_;
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1403,6 +1451,12 @@ class Assembler : public AssemblerBase {
void GrowBuffer();
inline void emit(Instr x);
+ // 32-bit immediate values
+ void move_32_bit_immediate(Condition cond,
+ Register rd,
+ SBit s,
+ const Operand& x);
+
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
void addrmod2(Instr instr, Register rd, const MemOperand& x);
@@ -1416,8 +1470,14 @@ class Assembler : public AssemblerBase {
void link_to(Label* L, Label* appendix);
void next(Label* L);
+ enum UseConstantPoolMode {
+ USE_CONSTANT_POOL,
+ DONT_USE_CONSTANT_POOL
+ };
+
// Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
+ UseConstantPoolMode mode = USE_CONSTANT_POOL);
friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
@@ -1425,7 +1485,6 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
- bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
diff --git a/src/3rdparty/v8/src/arm/builtins-arm.cc b/src/3rdparty/v8/src/arm/builtins-arm.cc
index c99e778..24d14e8 100644
--- a/src/3rdparty/v8/src/arm/builtins-arm.cc
+++ b/src/3rdparty/v8/src/arm/builtins-arm.cc
@@ -75,12 +75,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the global context.
+ // Load the native context.
- __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the InternalArray function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
__ ldr(result,
MemOperand(result,
Context::SlotOffset(
@@ -90,12 +91,13 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
+ // Load the native context.
- __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the Array function from the native context.
__ ldr(result,
MemOperand(result,
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
@@ -114,7 +116,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -208,7 +210,8 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
+ __ LoadInitialArrayMap(array_function, scratch2,
+ elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
@@ -440,10 +443,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(call_generic_code);
__ bind(&not_double);
- // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r2,
r9,
@@ -696,6 +699,43 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(pc, r2);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
+
+ __ push(r1); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore receiver.
+ __ pop(r1);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -1186,6 +1226,39 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r0 - contains return address (beginning of patch sequence)
+ // r1 - function object
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ PrepareCallCFunction(1, 0, r1);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ mov(pc, r0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1245,7 +1318,7 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CpuFeatures::IsSupported(VFP3)) {
+ if (!CPU::SupportsCrankshaft()) {
__ Abort("Unreachable code: Cannot optimize without VFP3 support.");
return;
}
@@ -1365,9 +1438,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
@@ -1560,9 +1633,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.cc b/src/3rdparty/v8/src/arm/code-stubs-arm.cc
index bea3169..a769f54 100644
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.cc
+++ b/src/3rdparty/v8/src/arm/code-stubs-arm.cc
@@ -85,6 +85,8 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
+ Counters* counters = masm->isolate()->counters();
+
Label gc;
// Pop the function info from the stack.
@@ -98,32 +100,44 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
+ __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
+
int map_index = (language_mode_ == CLASSIC_MODE)
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current global context and set that
+ // Compute the function map in the current native context and set that
// as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
+ __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
+ __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
__ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ Label check_optimized;
+ Label install_unoptimized;
+ if (FLAG_cache_optimized_code) {
+ __ ldr(r1,
+ FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ tst(r1, r1);
+ __ b(ne, &check_optimized);
+ }
+ __ bind(&install_unoptimized);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
@@ -131,6 +145,72 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Return result. The argument function info has been popped already.
__ Ret();
+ __ bind(&check_optimized);
+
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
+
+ // r2 holds native context, r1 points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // The optimized code map must never be empty, so check the first elements.
+ Label install_optimized;
+ // Speculatively move code object into r4.
+ __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
+ __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
+ __ cmp(r2, r5);
+ __ b(eq, &install_optimized);
+
+ // Iterate through the rest of map backwards. r4 holds an index as a Smi.
+ Label loop;
+ __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
+ __ bind(&loop);
+ // Do not double check first entry.
+
+ __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ b(eq, &install_unoptimized);
+ __ sub(r4, r4, Operand(
+ Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
+ __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r5, MemOperand(r5));
+ __ cmp(r2, r5);
+ __ b(ne, &loop);
+ // Hit: fetch the optimized code.
+ __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r5, r5, Operand(kPointerSize));
+ __ ldr(r4, MemOperand(r5));
+
+ __ bind(&install_optimized);
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(),
+ 1, r6, r7);
+
+ // TODO(fschneider): Idea: store proper code pointers in the map and either
+ // unmangle them on marking or do nothing as the whole map is discarded on
+ // major GC anyway.
+ __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
+
+ // Now link a function into a list of optimized functions.
+ __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+ __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
+ // No need for write barrier as JSFunction (eax) is in the new space.
+
+ __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Store JSFunction (eax) into edx before issuing write barrier as
+ // it clobbers all the registers passed.
+ __ mov(r4, r0);
+ __ RecordWriteContextSlot(
+ r2,
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+ r4,
+ r1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ // Return result. The argument function info has been popped already.
+ __ Ret();
+
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ LoadRoot(r4, Heap::kFalseValueRootIndex);
@@ -162,16 +242,16 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
// Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Copy the qml global object from the surrounding context.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
+ __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -214,9 +294,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- // If this block context is nested in the global context we get a smi
+ // If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
+ // canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(r3, &after_sentinel);
@@ -226,20 +306,20 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(eq, message);
}
__ ldr(r3, GlobalObjectOperand());
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
- __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
+ __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX));
// Copy the qml global object from the surrounding context.
- __ ldr(r1, ContextOperand(cp, Context::QML_GLOBAL_INDEX));
- __ str(r1, ContextOperand(r0, Context::QML_GLOBAL_INDEX));
+ __ ldr(r1, ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX));
+ __ str(r1, ContextOperand(r0, Context::QML_GLOBAL_OBJECT_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
@@ -527,8 +607,8 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
__ vmov(d7.high(), scratch1);
__ vcvt_f64_s32(d7, d7.high());
@@ -583,11 +663,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label is_smi, done;
@@ -597,9 +675,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP3) &&
+ if (CpuFeatures::IsSupported(VFP2) &&
destination == kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
// Load the double from tagged HeapNumber to double register.
__ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(dst, scratch1, HeapNumber::kValueOffset);
@@ -612,8 +690,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
// Handle loading a double from a smi.
__ bind(&is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Convert smi to double using VFP instructions.
__ vmov(dst.high(), scratch1);
__ vcvt_f64_s32(dst, dst.high());
@@ -644,11 +722,9 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
Register scratch3,
DwVfpRegister double_scratch,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label done;
Label not_in_int32_range;
@@ -690,8 +766,8 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Label done;
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(single_scratch, int_scratch);
__ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) {
@@ -754,6 +830,7 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DwVfpRegister double_dst,
+ DwVfpRegister double_scratch,
Register dst1,
Register dst2,
Register heap_number_map,
@@ -776,25 +853,23 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ b(&done);
__ bind(&obj_is_not_smi);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_dst,
scratch1,
+ double_dst,
scratch2,
+ double_scratch,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
@@ -834,7 +909,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- DwVfpRegister double_scratch,
+ DwVfpRegister double_scratch0,
+ DwVfpRegister double_scratch1,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -846,34 +922,29 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ UntagAndJumpIfSmi(dst, object, &done);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- SwVfpRegister single_scratch = double_scratch.low();
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
+ __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
+ dst,
+ double_scratch0,
scratch1,
- scratch2,
+ double_scratch1,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
__ b(ne, not_int32);
- // Get the result in the destination register.
- __ vmov(dst, single_scratch);
-
} else {
// Load the double value in the destination registers.
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
@@ -986,7 +1057,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
@@ -998,7 +1069,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vstr(d0,
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
} else {
@@ -1217,9 +1288,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
// Convert lhs to a double in d7.
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
// Load the double from rhs, tagged HeapNumber r0, to d6.
__ sub(r7, rhs, Operand(kHeapObjectTag));
@@ -1257,8 +1328,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Load the double from lhs, tagged HeapNumber r1, to d7.
__ sub(r7, lhs, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
@@ -1370,7 +1441,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ push(lr);
__ PrepareCallCFunction(0, 2, r5);
if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
@@ -1445,8 +1516,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
__ sub(r7, lhs, Operand(kHeapObjectTag));
@@ -1535,8 +1606,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ CheckMap(object,
scratch1,
Heap::kHeapNumberMapRootIndex,
@@ -1698,9 +1769,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
__ bind(&lhs_not_nan);
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
__ VFPCompareAndSetFlags(d7, d6);
@@ -1818,11 +1889,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
- // This stub uses VFP3 instructions.
- CpuFeatures::Scope scope(VFP3);
-
Label patch;
const Register map = r9.is(tos_) ? r7 : r9;
+ const Register temp = map;
// undefined -> false.
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
@@ -1875,13 +1944,56 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, &not_heap_number);
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
+ __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+ __ VFPCompareAndSetFlags(d1, 0.0);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
+ } else {
+ Label done, not_nan, not_zero;
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
+ // -0 maps to false:
+ __ bic(
+ temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC);
+ __ b(ne, &not_zero);
+ // If exponent word is zero then the answer depends on the mantissa word.
+ __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
+ __ jmp(&done);
+
+ // Check for NaN.
+ __ bind(&not_zero);
+ // We already zeroed the sign bit, now shift out the mantissa so we only
+ // have the exponent left.
+ __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
+ unsigned int shifted_exponent_mask =
+ HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
+ __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE));
+ __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
+
+ // Reload exponent word.
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
+ __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE));
+ // If mantissa is not zero then we have a NaN, so return 0.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ b(ne, &done);
+
+ // Load mantissa word.
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
+ __ cmp(temp, Operand(0, RelocInfo::NONE));
+ // If mantissa is not zero then we have a NaN, so return 0.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ b(ne, &done);
+
+ __ bind(&not_nan);
+ __ mov(tos_, Operand(1, RelocInfo::NONE));
+ __ bind(&done);
+ }
__ Ret();
__ bind(&not_heap_number);
}
@@ -1931,7 +2043,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
@@ -1949,7 +2061,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, MemOperand(sp, i * kDoubleSize));
@@ -2179,9 +2291,9 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ mov(r0, r2); // Move newly allocated heap number to r0.
}
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r1);
__ vcvt_f64_s32(d0, s0);
__ sub(r2, r0, Operand(kHeapObjectTag));
@@ -2464,9 +2576,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
Register scratch3 = r4;
ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands && FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
+ if (smi_operands) {
+ __ AssertSmi(left);
+ __ AssertSmi(right);
}
Register heap_number_map = r6;
@@ -2481,7 +2593,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) &&
+ CpuFeatures::IsSupported(VFP2) &&
op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
@@ -2508,7 +2620,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Using VFP registers:
// d6: Left value
// d7: Right value
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
switch (op_) {
case Token::ADD:
__ vadd(d5, d6, d7);
@@ -2597,7 +2709,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, not_numbers);
@@ -2636,10 +2748,10 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// result.
__ mov(r0, Operand(r5));
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r2);
if (op_ == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
@@ -2768,7 +2880,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Register scratch1 = r7;
Register scratch2 = r9;
DwVfpRegister double_scratch = d0;
- SwVfpRegister single_scratch = s3;
Register heap_number_result = no_reg;
Register heap_number_map = r6;
@@ -2798,7 +2909,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
+ (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
? FloatingPointHelper::kVFPRegisters
: FloatingPointHelper::kCoreRegisters;
@@ -2806,6 +2917,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
right,
destination,
d7,
+ d8,
r2,
r3,
heap_number_map,
@@ -2817,6 +2929,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
left,
destination,
d6,
+ d8,
r4,
r5,
heap_number_map,
@@ -2826,7 +2939,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&transition);
if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
Label return_heap_number;
switch (op_) {
case Token::ADD:
@@ -2852,10 +2965,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// transition.
__ EmitVFPTruncate(kRoundToZero,
- single_scratch,
- d5,
scratch1,
- scratch2);
+ d5,
+ scratch2,
+ d8);
if (result_type_ <= BinaryOpIC::INT32) {
// If the ne condition is set, result does
@@ -2864,7 +2977,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
// Check if the result fits in a smi.
- __ vmov(scratch1, single_scratch);
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
// If not try to return a heap number.
__ b(mi, &return_heap_number);
@@ -2959,6 +3071,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
d0,
+ d1,
&transition);
FloatingPointHelper::LoadNumberAsInt32(masm,
right,
@@ -2968,6 +3081,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
d0,
+ d1,
&transition);
// The ECMA-262 standard specifies that, for shift operations, only the
@@ -2993,9 +3107,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// We only get a negative result if the shift value (r2) is 0.
// This result cannot be respresented as a signed 32-bit integer, try
// to return a heap number if we can.
- // The non vfp3 code does not support this special case, so jump to
+ // The non vfp2 code does not support this special case, so jump to
// runtime if we don't support it.
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
__ b(mi, (result_type_ <= BinaryOpIC::INT32)
? &transition
: &return_heap_number);
@@ -3030,8 +3144,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
&call_runtime);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
if (op_ != Token::SHR) {
// Convert the result to a floating point value.
__ vmov(double_scratch.low(), r2);
@@ -3260,8 +3374,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
if (tagged) {
// Argument is a number and is on stack and in r0.
// Load argument and check if it is a smi.
@@ -3362,23 +3476,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1);
} else {
- if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
- CpuFeatures::Scope scope(VFP3);
+ ASSERT(CpuFeatures::IsSupported(VFP2));
+ CpuFeatures::Scope scope(VFP2);
Label no_update;
Label skip_cache;
// Call C function to calculate the result and update the cache.
- // Register r0 holds precalculated cache entry address; preserve
- // it on the stack and pop it into register cache_entry after the
- // call.
- __ push(cache_entry);
+ // r0: precalculated cache entry address.
+ // r2 and r3: parts of the double value.
+ // Store r0, r2 and r3 on stack for later before calling C function.
+ __ Push(r3, r2, cache_entry);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(d2);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
- __ pop(cache_entry);
+ __ Pop(r3, r2, cache_entry);
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
__ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
@@ -3424,6 +3538,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
Register scratch) {
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
Isolate* isolate = masm->isolate();
__ push(lr);
@@ -3484,7 +3599,7 @@ void InterruptStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope vfp3_scope(VFP3);
+ CpuFeatures::Scope vfp2_scope(VFP2);
const Register base = r1;
const Register exponent = r2;
const Register heapnumbermap = r5;
@@ -3553,13 +3668,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label not_plus_half;
// Test for 0.5.
- __ vmov(double_scratch, 0.5);
+ __ vmov(double_scratch, 0.5, scratch);
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
__ b(ne, &not_plus_half);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY);
+ __ vmov(double_scratch, -V8_INFINITY, scratch);
__ VFPCompareAndSetFlags(double_base, double_scratch);
__ vneg(double_result, double_scratch, eq);
__ b(eq, &done);
@@ -3570,20 +3685,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&not_plus_half);
- __ vmov(double_scratch, -0.5);
+ __ vmov(double_scratch, -0.5, scratch);
__ VFPCompareAndSetFlags(double_exponent, double_scratch);
__ b(ne, &call_runtime);
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY);
+ __ vmov(double_scratch, -V8_INFINITY, scratch);
__ VFPCompareAndSetFlags(double_base, double_scratch);
__ vmov(double_result, kDoubleRegZero, eq);
__ b(eq, &done);
// Add +0 to convert -0 to +0.
__ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vmov(double_result, 1);
+ __ vmov(double_result, 1.0, scratch);
__ vsqrt(double_scratch, double_scratch);
__ vdiv(double_result, double_result, double_scratch);
__ jmp(&done);
@@ -3618,7 +3733,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mov(exponent, scratch);
}
__ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, 1.0);
+ __ vmov(double_result, 1.0, scratch2);
// Get absolute value of exponent.
__ cmp(scratch, Operand(0));
@@ -3634,7 +3749,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(exponent, Operand(0));
__ b(ge, &done);
- __ vmov(double_scratch, 1.0);
+ __ vmov(double_scratch, 1.0, scratch);
__ vdiv(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
@@ -3776,9 +3891,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address.
- masm->add(lr, pc, Operand(4));
- __ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
+ {
+ // Prevent literal pool emission before return address.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ masm->add(lr, pc, Operand(4));
+ __ str(lr, MemOperand(sp, 0));
+ masm->Jump(r5);
+ }
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@@ -3936,8 +4055,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit());
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0.
@@ -3952,7 +4071,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Set up argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
}
__ ldr(r4, MemOperand(sp, offset_to_argv));
@@ -3995,14 +4114,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
__ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+
+ // Block literal pool emission whilst taking the position of the handler
+ // entry. This avoids making the assumption that literal pools are always
+ // emitted after an instruction is emitted, rather than before.
+ {
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ }
__ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
@@ -4045,9 +4171,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Branch and link to JSEntryTrampoline. We don't use the double underscore
// macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc.
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // inserting instructions here after we read the pc. We block literal pool
+ // emission for the same reason.
+ {
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ mov(lr, Operand(pc));
+ masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
// Unlink this frame from the handler chain.
__ PopTryHandler();
@@ -4079,8 +4209,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
#endif
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Restore callee-saved vfp registers.
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
}
@@ -4409,14 +4539,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r0 = address of new object(s) (tagged)
// r2 = argument count (tagged)
- // Get the arguments boilerplate from the current (global) context into r4.
+ // Get the arguments boilerplate from the current native context into r4.
const int kNormalOffset =
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
- __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ cmp(r1, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
@@ -4589,9 +4719,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT |
SIZE_IN_WORDS));
- // Get the arguments boilerplate from the current (global) context.
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ // Get the arguments boilerplate from the current native context.
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ ldr(r4, MemOperand(r4, Context::SlotOffset(
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
@@ -4720,7 +4850,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r2, r2, Operand(2)); // r2 was a smi.
// Check that the static offsets vector buffer is large enough.
- __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+ __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
__ b(hi, &runtime);
// r2: Number of capture registers
@@ -4832,7 +4962,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// r0: Instance type of subject string
- STATIC_ASSERT(4 == kAsciiStringTag);
+ STATIC_ASSERT(4 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
// Find the code object based on the assumptions above.
__ and_(r0, r0, Operand(kStringEncodingMask));
@@ -4863,27 +4993,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 8;
+ const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
- // Argument 8 (sp[16]): Pass current isolate address.
+ // Argument 9 (sp[20]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address()));
- __ str(r0, MemOperand(sp, 4 * kPointerSize));
+ __ str(r0, MemOperand(sp, 5 * kPointerSize));
- // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
+ // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
+ __ str(r0, MemOperand(sp, 4 * kPointerSize));
- // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
+ // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
+ __ str(r0, MemOperand(sp, 3 * kPointerSize));
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(r0, Operand(0));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
@@ -4932,7 +5067,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ __ cmp(r0, Operand(1));
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
@@ -5099,10 +5236,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ add(r3, r0, Operand(JSRegExpResult::kSize));
__ mov(r4, Operand(factory->empty_fixed_array()));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
@@ -5127,12 +5264,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set FixedArray length.
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with the-hole.
- __ mov(r2, Operand(factory->the_hole_value()));
+ // Fill contents of fixed-array with undefined.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with hole.
+ // Fill fixed array elements with undefined.
// r0: JSArray, tagged.
- // r2: the hole.
+ // r2: undefined.
// r3: Start of elements in FixedArray.
// r5: Number of elements to fill.
Label loop;
@@ -5208,7 +5345,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call);
// Patch the receiver on the stack with the global receiver object.
- __ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r3,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
@@ -5900,23 +6038,28 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- // I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(r2, ASR, 1), SetCC);
- __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
- // If either to or from had the smi tag bit set, then carry is set now.
- __ b(cs, &runtime); // Either "from" or "to" is not a smi.
+ // Arithmetic shift right by one un-smi-tags. In this case we rotate right
+ // instead because we bail out on non-smi values: ROR and ASR are equivalent
+ // for smis but they set the flags in a way that's easier to optimize.
+ __ mov(r2, Operand(r2, ROR, 1), SetCC);
+ __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
+ // If either to or from had the smi tag bit set, then C is set now, and N
+ // has the same value: we rotated by 1, so the bottom bit is now the top bit.
// We want to bailout to runtime here if From is negative. In that case, the
// next instruction is not executed and we fall through to bailing out to
- // runtime. pl is the opposite of mi.
- // Both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC, pl);
- __ b(mi, &runtime); // Fail if from > to.
+ // runtime.
+ // Executed if both r2 and r3 are untagged integers.
+ __ sub(r2, r2, Operand(r3), SetCC, cc);
+ // One of the above un-smis or the above SUB could have set N==1.
+ __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
// Make sure first argument is a string.
__ ldr(r0, MemOperand(sp, kStringOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r0, &runtime);
- Condition is_string = masm->IsObjectStringType(r0, r1);
+ // Do a JumpIfSmi, but fold its jump into the subsequent string test.
+ __ tst(r0, Operand(kSmiTagMask));
+ Condition is_string = masm->IsObjectStringType(r0, r1, ne);
+ ASSERT(is_string == eq);
__ b(NegateCondition(is_string), &runtime);
// Short-cut for the case of trivial substring.
@@ -5987,7 +6130,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_slice);
@@ -6030,7 +6173,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_sequential);
@@ -6395,9 +6538,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r5, Operand(kAsciiDataHintMask), ne);
__ b(ne, &ascii_data);
__ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(r4, r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
+ __ cmp(r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
__ b(eq, &ascii_data);
// Allocate a two byte cons string.
@@ -6600,8 +6743,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Load left and right operand
__ sub(r2, r1, Operand(kHeapObjectTag));
@@ -6860,6 +7003,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
+
+ // Prevent literal pool emission during calculation of return address.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+
// Push return address (accessible to GC through exit frame pc).
// Note that using pc with str is deprecated.
Label start;
@@ -6970,8 +7117,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
ASSERT(!name.is(scratch1));
ASSERT(!name.is(scratch2));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
// Compute the capacity mask.
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
@@ -7150,8 +7296,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateSmiOnlyToObject
- // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // ElementsTransitionGenerator::GenerateMapChangeElementTransition
+ // and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
@@ -7160,12 +7306,15 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
+ // FastNewClosureStub::Generate
+ { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
#undef REG
+
bool RecordWriteStub::IsPregenerated() {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
@@ -7207,6 +7356,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
}
+bool CodeStub::CanUseFPRegisters() {
+ return CpuFeatures::IsSupported(VFP2);
+}
+
+
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
@@ -7220,8 +7374,13 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
// forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details.
- __ b(&skip_to_incremental_noncompacting);
- __ b(&skip_to_incremental_compacting);
+ {
+ // Block literal pool emission, as the position of these two instructions
+ // is assumed by the patching code.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ b(&skip_to_incremental_noncompacting);
+ __ b(&skip_to_incremental_compacting);
+ }
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object_,
@@ -7330,6 +7489,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
+ __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
+ __ ldr(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
+ __ str(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ b(mi, &need_incremental);
+
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -7414,9 +7583,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(r2, r5, &double_elements);
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
- __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
+ __ CheckFastSmiElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -7428,7 +7597,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
@@ -7439,8 +7608,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
@@ -7450,11 +7619,73 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
+ __ StoreNumberToDoubleElements(r0, r3, r1,
+ // Overwrites all regs after this.
+ r5, r6, r7, r9, r2,
&slow_elements);
__ Ret();
}
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ PredictableCodeSizeScope predictable(masm);
+ ProfileEntryHookStub stub;
+ __ push(lr);
+ __ CallStub(&stub);
+ __ pop(lr);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // The entry hook is a "push lr" instruction, followed by a call.
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ 3 * Assembler::kInstrSize;
+
+ // Save live volatile registers.
+ __ Push(lr, r5, r1);
+ const int32_t kNumSavedRegs = 3;
+
+ // Compute the function's address for the first argument.
+ __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
+
+ // The caller's return address is above the saved temporaries.
+ // Grab that for the second argument to the hook.
+ __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
+
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ __ mov(r5, sp);
+ ASSERT(IsPowerOf2(frame_alignment));
+ __ and_(sp, sp, Operand(-frame_alignment));
+ }
+
+#if defined(V8_HOST_ARCH_ARM)
+ __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
+ __ ldr(ip, MemOperand(ip));
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ Address trampoline_address = reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(EntryHookTrampoline));
+ ApiFunction dispatcher(trampoline_address);
+ __ mov(ip, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+#endif
+ __ Call(ip);
+
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+ __ mov(sp, r5);
+ }
+
+ __ Pop(lr, r5, r1);
+ __ Ret();
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.h b/src/3rdparty/v8/src/arm/code-stubs-arm.h
index 38ed476..3e79624 100644
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.h
+++ b/src/3rdparty/v8/src/arm/code-stubs-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -149,7 +149,7 @@ class BinaryOpStub: public CodeStub {
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
- use_vfp3_ = CpuFeatures::IsSupported(VFP3);
+ use_vfp2_ = CpuFeatures::IsSupported(VFP2);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -159,7 +159,7 @@ class BinaryOpStub: public CodeStub {
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
- use_vfp3_(VFP3Bits::decode(key)),
+ use_vfp2_(VFP2Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type) { }
@@ -171,7 +171,7 @@ class BinaryOpStub: public CodeStub {
Token::Value op_;
OverwriteMode mode_;
- bool use_vfp3_;
+ bool use_vfp2_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo operands_type_;
@@ -182,7 +182,7 @@ class BinaryOpStub: public CodeStub {
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
- class VFP3Bits: public BitField<bool, 9, 1> {};
+ class VFP2Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
@@ -190,7 +190,7 @@ class BinaryOpStub: public CodeStub {
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
- | VFP3Bits::encode(use_vfp3_)
+ | VFP2Bits::encode(use_vfp2_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
@@ -571,7 +571,7 @@ class RecordWriteStub: public CodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
@@ -586,7 +586,7 @@ class RecordWriteStub: public CodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
// Restore all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
@@ -773,6 +773,7 @@ class FloatingPointHelper : public AllStatic {
Register object,
Destination destination,
DwVfpRegister double_dst,
+ DwVfpRegister double_scratch,
Register dst1,
Register dst2,
Register heap_number_map,
@@ -794,7 +795,8 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2,
Register scratch3,
- DwVfpRegister double_scratch,
+ DwVfpRegister double_scratch0,
+ DwVfpRegister double_scratch1,
Label* not_int32);
// Generate non VFP3 code to check if a double can be exactly represented by a
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.cc b/src/3rdparty/v8/src/arm/codegen-arm.cc
index befd8f2..209e151 100644
--- a/src/3rdparty/v8/src/arm/codegen-arm.cc
+++ b/src/3rdparty/v8/src/arm/codegen-arm.cc
@@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
-void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
@@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
}
-void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
@@ -107,7 +107,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// -- r4 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
+ bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
@@ -121,15 +121,34 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
- __ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
- __ add(lr, lr, Operand(r5, LSL, 2));
+ // Use lr as a temporary register.
+ __ mov(lr, Operand(r5, LSL, 2));
+ __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
- // r6: destination FixedDoubleArray, not tagged as heap object
+ // r6: destination FixedDoubleArray, not tagged as heap object.
+
+ // Align the array conveniently for doubles.
+ // Store a filler value in the unused memory.
+ Label aligned, aligned_done;
+ __ tst(r6, Operand(kDoubleAlignmentMask));
+ __ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map()));
+ __ b(eq, &aligned);
+ // Store at the beginning of the allocated memory and update the base pointer.
+ __ str(ip, MemOperand(r6, kPointerSize, PostIndex));
+ __ b(&aligned_done);
+
+ __ bind(&aligned);
+ // Store the filler at the end of the allocated memory.
+ __ sub(lr, lr, Operand(kPointerSize));
+ __ str(ip, MemOperand(r6, lr));
+
+ __ bind(&aligned_done);
+
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Update receiver's map.
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
@@ -163,7 +182,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged
- if (!vfp3_supported) __ Push(r1, r0);
+ if (!vfp2_supported) __ Push(r1, r0);
__ b(&entry);
@@ -191,8 +210,8 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store.
- if (vfp3_supported) {
- CpuFeatures::Scope scope(VFP3);
+ if (vfp2_supported) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
@@ -225,7 +244,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ cmp(r7, r6);
__ b(lt, &loop);
- if (!vfp3_supported) __ Pop(r1, r0);
+ if (!vfp2_supported) __ Pop(r1, r0);
__ pop(lr);
__ bind(&done);
}
@@ -433,6 +452,92 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
#undef __
+// add(r0, pc, Operand(-8))
+static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found in FUNCTIONS
+ static bool initialized = false;
+ static uint32_t sequence[kNoCodeAgeSequenceLength];
+ byte* byte_sequence = reinterpret_cast<byte*>(sequence);
+ *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ if (!initialized) {
+ CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
+ initialized = true;
+ }
+ return byte_sequence;
+}
+
+
+byte* Code::FindPlatformCodeAgeSequence() {
+ byte* start = instruction_start();
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (!memcmp(start, young_sequence, young_length) ||
+ Memory::uint32_at(start) == kCodeAgePatchFirstInstruction) {
+ return start;
+ } else {
+ byte* start_after_strict = NULL;
+ if (kind() == FUNCTION) {
+ start_after_strict = start + kSizeOfFullCodegenStrictModePrologue;
+ } else {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ start_after_strict = start + kSizeOfOptimizedStrictModePrologue;
+ }
+ ASSERT(!memcmp(start_after_strict, young_sequence, young_length) ||
+ Memory::uint32_at(start_after_strict) ==
+ kCodeAgePatchFirstInstruction);
+ return start_after_strict;
+ }
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = !memcmp(sequence, young_sequence, young_length);
+ ASSERT(result ||
+ Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ Address target_address = Memory::Address_at(
+ sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAge) {
+ memcpy(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(age, parity);
+ CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ patcher.masm()->add(r0, pc, Operand(-8));
+ patcher.masm()->ldr(pc, MemOperand(pc, -4));
+ patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.h b/src/3rdparty/v8/src/arm/codegen-arm.h
index c340e6b..c77844d 100644
--- a/src/3rdparty/v8/src/arm/codegen-arm.h
+++ b/src/3rdparty/v8/src/arm/codegen-arm.h
@@ -34,6 +34,9 @@
namespace v8 {
namespace internal {
+static const int kSizeOfFullCodegenStrictModePrologue = 16;
+static const int kSizeOfOptimizedStrictModePrologue = 16;
+
// Forward declarations
class CompilationInfo;
diff --git a/src/3rdparty/v8/src/arm/constants-arm.h b/src/3rdparty/v8/src/arm/constants-arm.h
index e767001..03876f9 100644
--- a/src/3rdparty/v8/src/arm/constants-arm.h
+++ b/src/3rdparty/v8/src/arm/constants-arm.h
@@ -29,14 +29,14 @@
#define V8_ARM_CONSTANTS_ARM_H_
// ARM EABI is required.
-#if defined(__arm__) && !defined(__ARM_EABI__)
+#if defined(__arm__) && !defined(__ARM_EABI__) && !defined(_WIN32_WCE)
#error ARM EABI support is required.
#endif
// This means that interwork-compatible jump instructions are generated. We
// want to generate them on the simulator too so it makes snapshots that can
// be used on real hardware.
-#if defined(__THUMB_INTERWORK__) || !defined(__arm__)
+#if defined(__THUMB_INTERWORK__) || !defined(__arm__) || defined(_WIN32_WCE)
# define USE_THUMB_INTERWORK 1
#endif
@@ -56,16 +56,19 @@
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
+#if defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || \
+ defined(__ARM_ARCH_5TEJ__) || \
defined(CAN_USE_ARMV6_INSTRUCTIONS)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
// Simulator should support ARM5 instructions and unaligned access by default.
-#if !defined(__arm__)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
+#if !defined(__arm__) || defined(_WIN32_WCE)
+# if !defined(_WIN32_WCE)
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# endif
# define CAN_USE_THUMB_INSTRUCTIONS 1
# ifndef CAN_USE_UNALIGNED_ACCESSES
@@ -74,10 +77,6 @@
#endif
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#endif
-
// Using blx may yield better code, so use it when required or when available
#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
#define USE_BLX 1
@@ -87,9 +86,18 @@ namespace v8 {
namespace internal {
// Constant pool marker.
-const int kConstantPoolMarkerMask = 0xffe00000;
-const int kConstantPoolMarker = 0x0c000000;
-const int kConstantPoolLengthMask = 0x001ffff;
+// Use UDF, the permanently undefined instruction.
+const int kConstantPoolMarkerMask = 0xfff000f0;
+const int kConstantPoolMarker = 0xe7f000f0;
+const int kConstantPoolLengthMaxMask = 0xffff;
+inline int EncodeConstantPoolLength(int length) {
+ ASSERT((length & kConstantPoolLengthMaxMask) == length);
+ return ((length & 0xfff0) << 4) | (length & 0xf);
+}
+inline int DecodeConstantPoolLength(int instr) {
+ ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
+ return ((instr >> 4) & 0xfff0) | (instr & 0xf);
+}
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
@@ -690,6 +698,9 @@ class Instruction {
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
+ // Test for a nop instruction, which falls under type 1.
+ inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
+
// Test for a stop instruction.
inline bool IsStop() const {
return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
diff --git a/src/3rdparty/v8/src/arm/cpu-arm.cc b/src/3rdparty/v8/src/arm/cpu-arm.cc
index f7da6c3..bed9503 100644
--- a/src/3rdparty/v8/src/arm/cpu-arm.cc
+++ b/src/3rdparty/v8/src/arm/cpu-arm.cc
@@ -29,7 +29,7 @@
#include "v8.h"
-#if defined(__arm__)
+#if defined(__arm__) && !defined(_WIN32_WCE)
#if !defined(__QNXNTO__)
#include <sys/syscall.h> // for cache flushing.
#else
@@ -73,6 +73,11 @@ void CPU::FlushICache(void* start, size_t size) {
// The QNX kernel does not expose the symbol __ARM_NR_cacheflush so we
// use the msync system call instead of the approach used on Linux
msync(start, size, MS_SYNC|MS_INVALIDATE_ICACHE);
+#elif defined(_WIN32_WCE)
+ // Windows CE compiler does not support the asm command, nor does it expose
+ // __ARM_NR_cacheflush. As well as Windows CE does not support to flush a
+ // region, so we need to flush the whole process.
+ FlushInstructionCache(GetCurrentProcess(), NULL, NULL);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
diff --git a/src/3rdparty/v8/src/arm/debug-arm.cc b/src/3rdparty/v8/src/arm/debug-arm.cc
index 3e7a1e9..c2941be 100644
--- a/src/3rdparty/v8/src/arm/debug-arm.cc
+++ b/src/3rdparty/v8/src/arm/debug-arm.cc
@@ -48,7 +48,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// add sp, sp, #4
// bx lr
// to a call to the debug break return code.
- // #if USE_BLX
+ // #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
// #else
@@ -99,7 +99,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// mov r2, r2
// mov r2, r2
// to a call to the debug break slot code.
- // #if USE_BLX
+ // #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
// #else
diff --git a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc b/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
index 699e6aa..19667b9 100644
--- a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
+++ b/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
@@ -50,6 +50,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (!function->IsOptimized()) return;
+ // The optimized code is going to be patched, so we cannot use it
+ // any more. Play safe and reset the whole cache.
+ function->shared()->ClearOptimizedCodeMap();
+
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
@@ -69,8 +73,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
- int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
- RelocInfo::NONE);
+ // We need calls to have a predictable size in the unoptimized code, but
+ // this is optimized code, so we don't have to have a predictable size.
+ int call_size_in_bytes =
+ MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
+ RelocInfo::NONE);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
@@ -97,8 +104,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
+ ReplaceCodeForRelatedFunctions(function, code);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@@ -196,11 +202,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
}
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
@@ -219,7 +225,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
- int bailout_id = LookupBailoutId(data, ast_id);
+ int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
@@ -239,9 +245,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
+ int closure_id = iterator.Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
@@ -352,8 +358,8 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
@@ -577,19 +583,145 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (FLAG_trace_deopt) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 5 stack entries from StackFrame::INTERNAL (lr, fp, cp, frame type,
+ // code object, see MacroAssembler::EnterFrame). For a setter stub frames we
+ // need one additional entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
diff --git a/src/3rdparty/v8/src/arm/disasm-arm.cc b/src/3rdparty/v8/src/arm/disasm-arm.cc
index 96a7d3c..af2ed52 100644
--- a/src/3rdparty/v8/src/arm/disasm-arm.cc
+++ b/src/3rdparty/v8/src/arm/disasm-arm.cc
@@ -692,11 +692,19 @@ void Decoder::DecodeType01(Instruction* instr) {
// Rn field to encode it.
Format(instr, "mul'cond's 'rn, 'rm, 'rs");
} else {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ if (instr->Bit(22) == 0) {
+ // The MLA instruction description (A 4.1.28) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ } else {
+ // The MLS instruction description (A 4.1.29) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
+ }
}
} else {
// The signed/long multiply instructions use the terms RdHi and RdLo
@@ -822,6 +830,8 @@ void Decoder::DecodeType01(Instruction* instr) {
} else {
Unknown(instr); // not used by V8
}
+ } else if ((type == 1) && instr->IsNopType1()) {
+ Format(instr, "nop'cond");
} else {
switch (instr->OpcodeField()) {
case AND: {
@@ -974,6 +984,17 @@ void Decoder::DecodeType3(Instruction* instr) {
break;
}
case db_x: {
+ if (FLAG_enable_sudiv) {
+ if (!instr->HasW()) {
+ if (instr->Bits(5, 4) == 0x1) {
+ if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+ // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+ Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
+ break;
+ }
+ }
+ }
+ }
Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
break;
}
@@ -1367,7 +1388,7 @@ bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return instruction_bits & kConstantPoolLengthMask;
+ return DecodeConstantPoolLength(instruction_bits);
} else {
return -1;
}
@@ -1389,8 +1410,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
- instruction_bits &
- kConstantPoolLengthMask);
+ DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
diff --git a/src/3rdparty/v8/src/arm/full-codegen-arm.cc b/src/3rdparty/v8/src/arm/full-codegen-arm.cc
index 2555c04..9a7b116 100644
--- a/src/3rdparty/v8/src/arm/full-codegen-arm.cc
+++ b/src/3rdparty/v8/src/arm/full-codegen-arm.cc
@@ -73,9 +73,6 @@ class JumpPatchSite BASE_EMBEDDED {
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
- // Don't use b(al, ...) as that might emit the constant pool right after the
- // branch. After patching when the branch is no longer unconditional
- // execution can continue into the constant pool.
__ b(eq, target); // Always taken before patched.
}
@@ -90,6 +87,8 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
+ // Block literal pool emission whilst recording patch site information.
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
@@ -135,6 +134,8 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -148,12 +149,15 @@ void FullCodeGenerator::Generate() {
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
+ Label begin;
+ __ bind(&begin);
__ cmp(r5, Operand(0));
__ b(eq, &ok);
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
+ ASSERT_EQ(kSizeOfFullCodegenStrictModePrologue, ok.pos() - begin.pos());
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -163,12 +167,12 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
- __ Push(lr, fp, cp, r1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
+ // The following four instructions must remain together and unmodified for
+ // code aging to work properly.
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
// Adjust fp to point to caller's fp.
__ add(fp, sp, Operand(2 * kPointerSize));
@@ -184,11 +188,14 @@ void FullCodeGenerator::Generate() {
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0 ||
(scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
+ // Argument to NewContext is the function, which is still in r1.
+ Comment cmnt(masm_, "[ Allocate context");
__ push(r1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -264,7 +271,7 @@ void FullCodeGenerator::Generate() {
scope()->VisitIllegalRedeclaration(this);
} else {
- PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
@@ -279,11 +286,12 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@@ -330,7 +338,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
- reset_value = 10;
+ reset_value = FLAG_interrupt_budget >> 4;
}
__ mov(r2, Operand(profiling_counter_));
__ mov(r3, Operand(Smi::FromInt(reset_value)));
@@ -338,13 +346,11 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 142;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
+ // Block literal pools whilst emitting stack check code.
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
if (FLAG_count_based_interrupts) {
@@ -353,7 +359,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
@@ -363,6 +369,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
}
@@ -405,7 +412,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -436,6 +443,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ PredictableCodeSizeScope predictable(masm_);
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
@@ -675,18 +683,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- if (CpuFeatures::IsSupported(VFP3)) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub);
- __ tst(result_register(), result_register());
- } else {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
- }
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub);
+ __ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -787,7 +786,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
@@ -809,11 +808,12 @@ void FullCodeGenerator::VisitVariableDeclaration(
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ : isolate()->factory()->undefined_value(),
+ zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
break;
case Variable::PARAMETER:
@@ -840,10 +840,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR || mode == LET ||
- mode == CONST || mode == CONST_HARMONY);
- PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
- ? READ_ONLY : NONE;
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -869,13 +868,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_->Add(function);
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ globals_->Add(function, zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
break;
}
@@ -929,9 +928,9 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name());
- globals_->Add(instance);
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
Visit(declaration->module());
break;
}
@@ -1135,26 +1134,34 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
- __ mov(r2, r0);
- __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
+ __ cmp(r2, ip);
__ b(ne, &fixed_array);
// We got a map in register r0. Get the enumeration cache from it.
+ Label no_descriptors;
__ bind(&use_cache);
- __ LoadInstanceDescriptors(r0, r1);
- __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
- __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ __ EnumLength(r1, r0);
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ __ b(eq, &no_descriptors);
+
+ __ LoadInstanceDescriptors(r0, r2);
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(r0); // Map.
- __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r2, r1, r0);
__ jmp(&loop);
+ __ bind(&no_descriptors);
+ __ Drop(1);
+ __ jmp(&exit);
+
// We got a fixed array in register r0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
@@ -1163,7 +1170,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
- RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
@@ -1319,9 +1326,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ Move(next, current);
}
__ bind(&loop);
- // Terminate at global context.
+ // Terminate at native context.
__ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
__ cmp(temp, ip);
__ b(eq, &fast);
// Check that extension is NULL.
@@ -1607,9 +1614,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
- AccessorTable accessor_table(isolate()->zone());
+ AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1635,7 +1642,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1705,7 +1712,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1726,8 +1733,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- ASSERT(constant_elements_kind == FAST_ELEMENTS ||
- constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1755,7 +1761,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (constant_elements_kind == FAST_ELEMENTS) {
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
@@ -1842,11 +1848,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@@ -1903,7 +1909,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1911,7 +1917,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1938,7 +1944,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2021,7 +2028,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -2152,7 +2160,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
@@ -2185,43 +2193,16 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
ASSERT(prop != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is now under value.
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- // Load receiver to r1. Leave a copy in the stack if needed for turning the
- // receiver into fast case.
- if (expr->ends_initialization_block()) {
- __ ldr(r1, MemOperand(sp));
- } else {
- __ pop(r1);
- }
+ __ pop(r1);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- // Receiver is under the result value.
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- __ Drop(1);
- }
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
}
@@ -2230,44 +2211,16 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- // Receiver is now under the key and value.
- __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ pop(r1); // Key.
- // Load receiver to r2. Leave a copy in the stack if needed for turning the
- // receiver into fast case.
- if (expr->ends_initialization_block()) {
- __ ldr(r2, MemOperand(sp));
- } else {
- __ pop(r2);
- }
+ __ pop(r2);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- // Receiver is under the result value.
- __ ldr(ip, MemOperand(sp, kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- __ Drop(1);
- }
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
}
@@ -2280,6 +2233,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(r0);
} else {
VisitForStackValue(expr->obj());
@@ -2293,9 +2247,11 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
ic_total_count_++;
- __ Call(code, rmode, ast_id);
+ // All calls must have a predictable size in full-codegen code to ensure that
+ // the debugger can patch them correctly.
+ __ Call(code, rmode, ast_id, al, NEVER_INLINE_TARGET_ADDRESS);
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
@@ -2315,7 +2271,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2348,7 +2304,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2368,16 +2324,14 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code, but not in the snapshot.
- if (!Serializer::enabled()) {
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ mov(r2, Operand(cell));
- }
+ // Record call targets in unoptimized code.
+ flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ mov(r2, Operand(cell));
CallFunctionStub stub(arg_count, flags);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2571,21 +2525,15 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code, but not in the snapshot.
- CallFunctionFlags flags;
- if (!Serializer::enabled()) {
- flags = RECORD_CALL_TARGET;
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ mov(r2, Operand(cell));
- } else {
- flags = NO_CALL_FUNCTION_FLAGS;
- }
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ mov(r2, Operand(cell));
- CallConstructStub stub(flags);
+ CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
@@ -2727,7 +2675,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(r0);
+ __ AssertNotSmi(r0);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
@@ -2742,27 +2690,31 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ b(eq, if_false);
// Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(r3, r1);
+ __ cmp(r3, Operand(0));
+ __ b(eq, &done);
+
__ LoadInstanceDescriptors(r1, r4);
- __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: descriptor array
- // r3: length of descriptor array
- // Calculate the end of the descriptor array.
+ // r4: descriptor array.
+ // r3: valid entries in the descriptor array.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kPointerSize == 4);
- __ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
+ __ mul(r3, r3, ip);
+ // Calculate location of the first key name.
+ __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
+ // Calculate the end of the descriptor array.
+ __ mov(r2, r4);
__ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- // Calculate location of the first key name.
- __ add(r4,
- r4,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag +
- DescriptorArray::kFirstIndex * kPointerSize));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
- Label entry, loop;
// The use of ip to store the valueOf symbol asumes that it is not otherwise
// used in the loop below.
__ mov(ip, Operand(FACTORY->value_of_symbol()));
@@ -2771,18 +2723,19 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r3, MemOperand(r4, 0));
__ cmp(r3, ip);
__ b(eq, if_false);
- __ add(r4, r4, Operand(kPointerSize));
+ __ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmp(r4, Operand(r2));
__ b(ne, &loop);
- // If a valueOf property is not found on the object check that it's
+ __ bind(&done);
+ // If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
__ JumpIfSmi(r2, if_false);
__ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ cmp(r2, r3);
__ b(ne, if_false);
@@ -3059,13 +3012,14 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
__ PrepareCallCFunction(1, r0);
- __ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
@@ -3082,9 +3036,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ mov(r0, r4);
} else {
__ PrepareCallCFunction(2, r0);
- __ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+ __ ldr(r1,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ mov(r0, Operand(r4));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
@@ -3146,20 +3101,19 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done;
+ Label runtime, done, not_date_object;
Register object = r0;
Register result = r0;
Register scratch0 = r9;
Register scratch1 = r1;
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ JumpIfSmi(object, &not_date_object);
__ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
- __ Assert(eq, "Trying to get date field from non-date.");
-#endif
+ __ b(ne, &not_date_object);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@@ -3176,8 +3130,12 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ PrepareCallCFunction(2, scratch1);
__ mov(r1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
+ __ jmp(&done);
}
+
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
context()->Plug(r0);
}
@@ -3188,7 +3146,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
@@ -3440,10 +3398,11 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
VisitForAccumulatorValue(args->last()); // Function.
- // Check for proxy.
- Label proxy, done;
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
- __ b(eq, &proxy);
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(r0, &runtime);
+ __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+ __ b(ne, &runtime);
// InvokeFunction requires the function in r1. Move it in there.
__ mov(r1, result_register());
@@ -3453,7 +3412,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
- __ bind(&proxy);
+ __ bind(&runtime);
__ push(r0);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3481,7 +3440,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
+ isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3493,8 +3452,8 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = r0;
Register cache = r1;
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+ __ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
__ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ ldr(cache,
FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
@@ -3591,9 +3550,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(r0);
- }
+ __ AssertString(r0);
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0);
@@ -3665,7 +3622,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Accumulated sum of string lengths (smi).
// element: Current array element.
// elements_end: Array end.
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ cmp(array_length, Operand(0));
__ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
}
@@ -3863,7 +3820,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -4018,7 +3975,8 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
context()->Plug(r0);
}
@@ -4076,7 +4034,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
- PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
// Call ToNumber only if operand is not a smi.
@@ -4129,7 +4087,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4161,7 +4119,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4178,7 +4136,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4387,7 +4345,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
@@ -4471,7 +4429,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
- // Contexts nested in the global context have a canonical empty function
+ // Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@@ -4501,14 +4459,57 @@ void FullCodeGenerator::EnterFinallyBlock() {
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
STATIC_ASSERT(kSmiTag == 0);
__ add(r1, r1, Operand(r1)); // Convert to smi.
+
+ // Store result register while executing finally block.
+ __ push(r1);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ ldr(r1, MemOperand(ip));
+ __ push(r1);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(ip, Operand(has_pending_message));
+ __ ldr(r1, MemOperand(ip));
+ __ SmiTag(r1);
+ __ push(r1);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(ip, Operand(pending_message_script));
+ __ ldr(r1, MemOperand(ip));
__ push(r1);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(r1));
+ // Restore pending message from stack.
+ __ pop(r1);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(ip, Operand(pending_message_script));
+ __ str(r1, MemOperand(ip));
+
+ __ pop(r1);
+ __ SmiUntag(r1);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(ip, Operand(has_pending_message));
+ __ str(r1, MemOperand(ip));
+
+ __ pop(r1);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ str(r1, MemOperand(ip));
+
// Restore result register from stack.
__ pop(r1);
+
// Uncook return address and return.
__ pop(result_register());
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
diff --git a/src/3rdparty/v8/src/arm/ic-arm.cc b/src/3rdparty/v8/src/arm/ic-arm.cc
index c12c167..4839589 100644
--- a/src/3rdparty/v8/src/arm/ic-arm.cc
+++ b/src/3rdparty/v8/src/arm/ic-arm.cc
@@ -396,7 +396,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
- NORMAL,
+ Code::NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
@@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@@ -1301,6 +1301,144 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length,
+ Register value,
+ Register key,
+ Register receiver,
+ Register receiver_map,
+ Register elements_map,
+ Register elements) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ Register scratch_value = r4;
+ Register address = r5;
+ if (check_map == kCheckMap) {
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ b(ne, fast_double);
+ }
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(address));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, scratch_value,
+ &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(address));
+ // Update write barrier for the elements array address.
+ __ mov(scratch_value, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ scratch_value,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+ __ b(ne, slow);
+ }
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ receiver,
+ elements, // Overwritten.
+ r3, // Scratch regs...
+ r4,
+ r5,
+ r6,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+ __ bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
+ __ b(ne, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ r4,
+ slow);
+ ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ r4,
+ slow);
+ ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ r4,
+ slow);
+ ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
@@ -1309,11 +1447,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
- Label slow, array, extra, check_if_double_array;
- Label fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
- Label transition_smi_elements, finish_object_store, non_double_value;
- Label transition_double_elements;
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
// Register usage.
Register value = r0;
@@ -1348,7 +1484,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
- __ b(lo, &fast_object_with_map_check);
+ __ b(lo, &fast_object);
// Slow case, handle jump to runtime.
__ bind(&slow);
@@ -1373,21 +1509,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, &check_if_double_array);
- // Calculate key + 1 as smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r4, key, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ b(&fast_object_without_map_check);
+ __ jmp(&fast_object_grow);
__ bind(&check_if_double_array);
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ b(ne, &slow);
- // Add 1 to key, and go to common element store code for doubles.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r4, key, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ jmp(&fast_double_without_map_check);
+ __ jmp(&fast_double_grow);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -1399,106 +1527,15 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(hs, &extra);
- // Fall through to fast case.
-
- __ bind(&fast_object_with_map_check);
- Register scratch_value = r4;
- Register address = r5;
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, &fast_double_with_map_check);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ b(ne, &slow);
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- receiver,
- elements,
- r3,
- r4,
- r5,
- r6,
- &transition_double_elements);
- __ Ret();
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- r4,
- &slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- &slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- &slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
}
@@ -1697,7 +1734,7 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address cmp_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
+ Assembler::return_address_from_call_start(address);
// If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined.
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.cc b/src/3rdparty/v8/src/arm/lithium-arm.cc
index a679c0c..b492d48 100644
--- a/src/3rdparty/v8/src/arm/lithium-arm.cc
+++ b/src/3rdparty/v8/src/arm/lithium-arm.cc
@@ -177,6 +177,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -194,22 +195,22 @@ void LGoto::PrintDataTo(StringStream* stream) {
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
@@ -218,57 +219,57 @@ void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
- InputAt(0)->PrintTo(stream);
- InputAt(1)->PrintTo(stream);
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@@ -278,7 +279,7 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
@@ -292,26 +293,26 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ function()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
@@ -340,17 +341,15 @@ void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
-
stream->Add(" length ");
length()->PrintTo(stream);
-
stream->Add(" index ");
index()->PrintTo(stream);
}
@@ -374,16 +373,7 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -407,146 +397,26 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) {
-}
-
-
-int LChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
- return LDoubleStackSlot::Create(index);
- } else {
- return LStackSlot::Create(index);
- }
-}
-
-
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap);
- index = instructions_.length();
- instructions_.Add(instr);
+ return LDoubleStackSlot::Create(index, zone());
} else {
- index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
- instr->pointer_map()->set_lithium_position(index);
+ return LStackSlot::Create(index, zone());
}
}
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
+LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new(zone()) LChunk(info(), graph());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -561,17 +431,8 @@ LChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LChunkBuilder::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -740,7 +601,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
@@ -762,7 +623,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
@@ -835,13 +696,16 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
}
}
}
@@ -974,8 +838,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber ||
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result = new(zone()) LEnvironment(
@@ -985,7 +849,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->parameter_count(),
argument_count_,
value_count,
- outer);
+ outer,
+ hydrogen_env->entry(),
+ zone());
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -999,7 +865,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
} else {
op = UseAny(value);
}
- result->AddValue(op, value->representation());
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
@@ -1164,7 +1032,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineFixedDouble(result, d2);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+
+ LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL;
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
@@ -1231,6 +1100,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1344,7 +1218,8 @@ HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
HConstant* constant_val = HConstant::cast(divisor);
int32_t int32_val = constant_val->Integer32Value();
if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
- return constant_val->CopyToRepresentation(Representation::Integer32());
+ return constant_val->CopyToRepresentation(Representation::Integer32(),
+ divisor->block()->zone());
}
}
return NULL;
@@ -1360,7 +1235,7 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HConstant::cast(right)->HasInteger32Value() &&
HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
return AssignEnvironment(DefineAsRegister(
- new LMathFloorOfDiv(dividend, divisor, remainder)));
+ new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
@@ -1477,6 +1352,25 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ right = UseOrConstantAtStart(instr->MostConstantOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
@@ -1642,6 +1536,12 @@ LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
}
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
@@ -1657,13 +1557,14 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
- LDateField* result = new LDateField(object, FixedTemp(r1), instr->index());
- return MarkAsCall(DefineFixed(result, r0), instr);
+ LDateField* result =
+ new(zone()) LDateField(object, FixedTemp(r1), instr->index());
+ return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterAtStart(instr->index());
+ LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
}
@@ -1706,16 +1607,14 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegisterAtStart(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
- if (!needs_check) {
- res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
+ if (instr->value()->type().IsSmi()) {
+ res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
- LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11)
- : NULL;
+ LOperand* temp3 = FixedTemp(d11);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
temp2,
@@ -1748,7 +1647,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ LNumberTagU* result = new(zone()) LNumberTagU(value);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ } else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
@@ -1756,8 +1658,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else {
ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegister(instr->value())));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(Use(instr->value())));
+ }
}
}
UNREACHABLE();
@@ -1779,10 +1686,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp1 = TempRegister();
+ LUnallocated* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(result);
+ LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+ return AssignEnvironment(Define(result, temp1));
}
@@ -1950,50 +1857,41 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* elements = UseTempRegister(instr->elements());
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
+ LLoadKeyed* result = NULL;
+ if (!instr->is_external()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseTempRegister(instr->elements());
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
+ }
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
- AssignEnvironment(load_instr) : load_instr;
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -2007,63 +1905,37 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
-
+ LOperand* key;
+ LOperand* val;
+ if (instr->NeedsWriteBarrier()) {
+ key = UseTempRegister(instr->key());
+ val = UseTempRegister(instr->value());
+ } else {
+ key = UseRegisterOrConstantAtStart(instr->key());
+ val = UseRegisterAtStart(instr->value());
+ }
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstant(instr->key());
+#ifdef DEBUG
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ } else {
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->value()->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+ }
+#endif
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ LStoreKeyed* result = new(zone()) LStoreKeyed(elements, key, val);
+ ASSERT(result != NULL);
+ return result;
}
@@ -2082,8 +1954,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
- instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ ElementsKind from_kind = instr->original_map()->elements_kind();
+ ElementsKind to_kind = instr->transitioned_map()->elements_kind();
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
@@ -2104,16 +1977,28 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = needs_write_barrier
- ? UseTempRegister(instr->object())
- : UseRegisterAtStart(instr->object());
+ bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ instr->NeedsWriteBarrierForMap();
+
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = instr->is_in_object()
+ ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map
+ ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
- return new(zone()) LStoreNamedField(obj, val);
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2156,7 +2041,8 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister());
+ LAllocateObject* result =
+ new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
@@ -2195,6 +2081,7 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2233,12 +2120,10 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2292,7 +2177,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+ pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
@@ -2318,10 +2203,11 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->function(),
undefined,
instr->call_kind(),
- instr->is_construct());
+ instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
+ inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2333,7 +2219,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* env = current_block_->last_environment();
- if (instr->arguments_pushed()) {
+ if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count;
@@ -2364,8 +2250,7 @@ LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
}
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.h b/src/3rdparty/v8/src/arm/lithium-arm.h
index 7e94f88..2c289dd 100644
--- a/src/3rdparty/v8/src/arm/lithium-arm.h
+++ b/src/3rdparty/v8/src/arm/lithium-arm.h
@@ -108,6 +108,7 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -115,7 +116,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -125,18 +125,19 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
V(MathFloorOfDiv) \
+ V(MathMinMax) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
+ V(NumberTagU) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
@@ -154,15 +155,14 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@@ -257,11 +257,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -270,6 +265,15 @@ class LInstruction: public ZoneObject {
#endif
private:
+ // Iterator support.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -289,16 +293,17 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- int InputCount() { return I; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ virtual int InputCount() { return I; }
+ virtual LOperand* InputAt(int i) { return inputs_[i]; }
+
+ virtual int TempCount() { return T; }
+ virtual LOperand* TempAt(int i) { return temps_[i]; }
};
@@ -333,8 +338,10 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
return parallel_moves_[pos];
}
@@ -514,6 +521,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = elements;
}
+ LOperand* elements() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
@@ -540,16 +549,22 @@ class LModI: public LTemplateInstruction<1, 2, 3> {
// Used for the standard case.
LModI(LOperand* left,
LOperand* right,
- LOperand* temp1,
+ LOperand* temp,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
temps_[2] = temp3;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
@@ -562,6 +577,9 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
@@ -577,6 +595,10 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
@@ -590,6 +612,10 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
@@ -602,6 +628,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
@@ -621,6 +650,9 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
@@ -636,6 +668,9 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
@@ -648,6 +683,8 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = left;
}
+ LOperand* left() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
@@ -660,6 +697,8 @@ class LIsNilAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
@@ -677,6 +716,9 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
@@ -691,6 +733,9 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
@@ -704,6 +749,8 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
@@ -718,6 +765,9 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
@@ -733,6 +783,9 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
@@ -749,6 +802,8 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
@@ -763,6 +818,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
@@ -774,6 +831,8 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
@@ -789,6 +848,9 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
@@ -804,6 +866,9 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -818,6 +883,9 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
@@ -829,6 +897,9 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
@@ -857,6 +928,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@@ -867,6 +939,9 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
@@ -883,7 +958,8 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
}
Token::Value op() const { return op_; }
-
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
@@ -901,6 +977,9 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
@@ -939,6 +1018,8 @@ class LBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
@@ -953,6 +1034,9 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
@@ -974,6 +1058,8 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
@@ -985,18 +1071,34 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
"fixed-array-base-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
+class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
@@ -1009,6 +1111,9 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
};
@@ -1021,40 +1126,26 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* date() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Smi* index() const { return index_; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
- Smi* index() const { return index_; }
private:
Smi* index_;
};
-class LSetDateField: public LTemplateInstruction<1, 2, 1> {
- public:
- LSetDateField(LOperand* date, LOperand* value, LOperand* temp, int index)
- : index_(index) {
- inputs_[0] = date;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-set-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- int index() const { return index_; }
-
- private:
- int index_;
-};
-
-
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1065,6 +1156,8 @@ class LBitNotI: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
@@ -1076,11 +1169,29 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
+class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
@@ -1088,6 +1199,9 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
@@ -1099,6 +1213,8 @@ class LRandom: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
+ LOperand* global_object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
@@ -1113,6 +1229,8 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
}
Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
@@ -1131,12 +1249,14 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ Token::Value op() const { return op_; }
+
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
- Token::Value op() const { return op_; }
-
private:
Token::Value op_;
};
@@ -1148,6 +1268,8 @@ class LReturn: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1158,6 +1280,8 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
@@ -1169,10 +1293,10 @@ class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
};
@@ -1182,10 +1306,11 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
@@ -1196,10 +1321,10 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
};
@@ -1209,6 +1334,8 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
@@ -1219,75 +1346,47 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
+ bool is_external() const {
+ return hydrogen()->is_external();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
+ LLoadKeyedGeneric(LOperand* object, LOperand* key) {
+ inputs_[0] = object;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
@@ -1304,10 +1403,11 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
+ LOperand* global_object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
- LOperand* global_object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
@@ -1320,10 +1420,11 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
- LOperand* value() { return inputs_[0]; }
};
@@ -1335,12 +1436,13 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
inputs_[1] = value;
}
+ LOperand* global_object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
- LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(1); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1351,10 +1453,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1368,11 +1471,12 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
inputs_[1] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1385,6 +1489,8 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
@@ -1421,9 +1527,9 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+ LOperand* context() { return inputs_[0]; }
- LOperand* context() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
};
@@ -1443,8 +1549,9 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
- LOperand* context() { return InputAt(0); }
+ LOperand* context() { return inputs_[0]; }
bool qml_global() { return qml_global_; }
+
private:
bool qml_global_;
};
@@ -1456,9 +1563,9 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+ LOperand* global_object() { return inputs_[0]; }
- LOperand* global() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
@@ -1480,11 +1587,11 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- LOperand* function() { return inputs_[0]; }
-
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1498,6 +1605,8 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = key;
}
+ LOperand* key() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
@@ -1526,10 +1635,11 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- LOperand* function() { return inputs_[0]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1570,6 +1680,8 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = constructor;
}
+ LOperand* constructor() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1595,28 +1707,60 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
+class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
+class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberTagU(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
public:
- LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
};
@@ -1624,12 +1768,16 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
public:
- LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1641,15 +1789,20 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
public:
LTaggedToI(LOperand* value,
- LOperand* temp1,
+ LOperand* temp,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
temps_[2] = temp3;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1663,6 +1816,8 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
@@ -1673,6 +1828,8 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -1685,30 +1842,33 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
+ LOperand* value() { return inputs_[0]; }
bool needs_check() const { return needs_check_; }
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
private:
bool needs_check_;
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
- LStoreNamedField(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
@@ -1718,109 +1878,67 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedGeneric(LOperand* object, LOperand* value) {
+ inputs_[0] = object;
+ inputs_[1] = value;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
+ bool is_external() const { return hydrogen()->is_external(); }
+ LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = val;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream);
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
inputs_[0] = obj;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- }
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+ virtual void PrintDataTo(StringStream* stream);
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1828,21 +1946,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
- LOperand* temp_reg) {
+ LOperand* temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
- temps_[1] = temp_reg;
+ temps_[1] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+ LOperand* temp() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_reg() { return temps_[0]; }
- LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
@@ -1855,11 +1974,11 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
@@ -1871,11 +1990,11 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
@@ -1885,10 +2004,10 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = char_code;
}
+ LOperand* char_code() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
- LOperand* char_code() { return inputs_[0]; }
};
@@ -1898,10 +2017,10 @@ class LStringLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = string;
}
+ LOperand* string() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
};
@@ -1911,7 +2030,7 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
- LOperand* value() { return InputAt(0); }
+ LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
@@ -1924,6 +2043,8 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
};
@@ -1935,18 +2056,23 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
+class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
public:
- LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
+ LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
@@ -1961,6 +2087,8 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
@@ -1971,18 +2099,21 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
public:
- LClampDToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
@@ -1990,8 +2121,8 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
}
LOperand* unclamped() { return inputs_[0]; }
@@ -2002,12 +2133,13 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -2015,11 +2147,14 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
public:
- LAllocateObject(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
+ LAllocateObject(LOperand* temp, LOperand* temp2) {
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
@@ -2068,6 +2203,8 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
@@ -2079,6 +2216,8 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2089,6 +2228,8 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
@@ -2104,6 +2245,8 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
@@ -2111,15 +2254,15 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
+ LDeleteProperty(LOperand* object, LOperand* key) {
+ inputs_[0] = object;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
};
@@ -2229,63 +2372,13 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LChunk: public ZoneObject {
+class LPlatformChunk: public LChunk {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph);
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
- }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2295,7 +2388,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->isolate()->zone()),
+ zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@@ -2304,10 +2397,10 @@ class LChunkBuilder BASE_EMBEDDED {
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
- LChunk* Build();
+ LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
@@ -2326,7 +2419,7 @@ class LChunkBuilder BASE_EMBEDDED {
ABORTED
};
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
@@ -2336,7 +2429,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2426,7 +2519,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
@@ -2438,7 +2531,7 @@ class LChunkBuilder BASE_EMBEDDED {
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
+ BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc b/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
index 90060a9..67773ee 100644
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
+++ b/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
@@ -91,17 +91,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LCodeGen::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -127,6 +118,8 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -145,15 +138,23 @@ bool LCodeGen::GeneratePrologue() {
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
Label ok;
+ Label begin;
+ __ bind(&begin);
__ cmp(r5, Operand(0));
__ b(eq, &ok);
int receiver_offset = scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
+ //ASSERT_EQ(kSizeOfOptimizedStrictModePrologue, ok.pos() - begin.pos());
}
+ // The following three instructions must remain together and unmodified for
+ // code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ // Add unused load of ip to ensure prologue sequence is identical for
+ // full-codegen and lithium-codegen.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
// Reserve space for the stack slots needed by the code.
@@ -323,7 +324,8 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -361,7 +363,8 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -387,9 +390,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
+ return constant->handle();
}
@@ -399,33 +402,33 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
+ ASSERT(constant->HasInteger32Value());
+ return constant->Integer32Value();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- return value->Number();
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
}
Operand LCodeGen::ToOperand(LOperand* op) {
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- return Operand(static_cast<int32_t>(literal->Number()));
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
Abort("ToOperand Unsupported double immediate.");
}
ASSERT(r.IsTagged());
- return Operand(literal);
+ return Operand(constant->handle());
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
@@ -470,7 +473,9 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -478,8 +483,21 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ // Function parameters are arguments to the outermost environment. The
+ // arguments index points to the first element of a sequence of tagged
+ // values on the stack that represent the arguments. This needs to be
+ // kept in sync with the LArgumentsElements implementation.
+ *arguments_index = -environment->parameter_count();
+ *arguments_count = environment->parameter_count();
+
+ WriteTranslation(environment->outer(),
+ translation,
+ arguments_index,
+ arguments_count);
+ int closure_id = *info()->closure() != *environment->closure()
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -487,12 +505,31 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
- default:
- UNREACHABLE();
}
+
+ // Inlined frames which push their arguments cause the index to be
+ // bumped and a new stack area to be used for materialization.
+ if (environment->entry() != NULL &&
+ environment->entry()->arguments_pushed()) {
+ *arguments_index = *arguments_index < 0
+ ? GetStackSlotCount()
+ : *arguments_index + *arguments_count;
+ *arguments_count = environment->entry()->arguments_count() + 1;
+ }
+
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -503,7 +540,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -511,26 +551,39 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
- false);
+ false,
+ false,
+ *arguments_index,
+ *arguments_count);
}
}
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged) {
+ bool is_tagged,
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject();
+ translation->StoreArgumentsObject(arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
@@ -544,6 +597,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@@ -551,8 +606,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
DoubleRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -562,19 +617,24 @@ void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+ LInstruction* instr,
+ TargetAddressStorageMode storage_mode) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
}
void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
- SafepointMode safepoint_mode) {
+ SafepointMode safepoint_mode,
+ TargetAddressStorageMode storage_mode) {
ASSERT(instr != NULL);
+ // Block literal pool emission to ensure nop indicating no inlined smi code
+ // is in the correct position.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- __ Call(code, mode);
+ __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
@@ -626,20 +686,22 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
+ int args_index = 0;
+ int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
- Translation translation(&translations_, frame_count, jsframe_count);
- WriteTranslation(environment, &translation);
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation, &args_index, &args_count);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment);
+ deoptimizations_.Add(environment, zone());
}
}
@@ -671,7 +733,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry)) {
- deopt_jump_table_.Add(JumpTableEntry(entry));
+ deopt_jump_table_.Add(JumpTableEntry(entry), zone());
}
__ b(cc, &deopt_jump_table_.last().label);
}
@@ -695,13 +757,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
data->SetLiteralArray(*literals);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@@ -716,7 +778,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
- deoptimization_literals_.Add(literal);
+ deoptimization_literals_.Add(literal, zone());
return result;
}
@@ -762,14 +824,14 @@ void LCodeGen::RecordSafepoint(
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
+ safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (kind & Safepoint::kWithRegisters) {
// Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
+ safepoint.DefinePointerRegister(cp, zone());
}
}
@@ -781,7 +843,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -900,7 +962,7 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->InputAt(0));
+ Register dividend = ToRegister(instr->left());
Register result = ToRegister(instr->result());
int32_t divisor =
@@ -925,112 +987,135 @@ void LCodeGen::DoModI(LModI* instr) {
}
// These registers hold untagged 32 bit values.
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
Register result = ToRegister(instr->result());
+ Label done;
- Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
- DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
- DwVfpRegister quotient = double_scratch0();
-
- ASSERT(!dividend.is(divisor));
- ASSERT(!dividend.is(quotient));
- ASSERT(!divisor.is(quotient));
- ASSERT(!scratch.is(left));
- ASSERT(!scratch.is(right));
- ASSERT(!scratch.is(result));
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatures::Scope scope(SUDIV);
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(right, Operand(0));
+ DeoptimizeIf(eq, instr->environment());
+ }
- Label done, vfp_modulo, both_positive, right_negative;
+ // For r3 = r1 % r2; we can have the following ARM code
+ // sdiv r3, r1, r2
+ // mls r3, r3, r2, r1
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
- DeoptimizeIf(eq, instr->environment());
- }
+ __ sdiv(result, left, right);
+ __ mls(result, result, right, left);
+ __ cmp(result, Operand(0));
+ __ b(ne, &done);
- __ Move(result, left);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(lt, instr->environment());
+ }
+ } else {
+ Register scratch = scratch0();
+ Register scratch2 = ToRegister(instr->temp());
+ DwVfpRegister dividend = ToDoubleRegister(instr->temp2());
+ DwVfpRegister divisor = ToDoubleRegister(instr->temp3());
+ DwVfpRegister quotient = double_scratch0();
+
+ ASSERT(!dividend.is(divisor));
+ ASSERT(!dividend.is(quotient));
+ ASSERT(!divisor.is(quotient));
+ ASSERT(!scratch.is(left));
+ ASSERT(!scratch.is(right));
+ ASSERT(!scratch.is(result));
+
+ Label vfp_modulo, both_positive, right_negative;
+
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(right, Operand(0));
+ DeoptimizeIf(eq, instr->environment());
+ }
- // (0 % x) must yield 0 (if x is finite, which is the case here).
- __ cmp(left, Operand(0));
- __ b(eq, &done);
- // Preload right in a vfp register.
- __ vmov(divisor.low(), right);
- __ b(lt, &vfp_modulo);
+ __ Move(result, left);
- __ cmp(left, Operand(right));
- __ b(lt, &done);
-
- // Check for (positive) power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
- scratch,
- &right_negative,
- &both_positive);
- // Perform modulo operation (scratch contains right - 1).
- __ and_(result, scratch, Operand(left));
- __ b(&done);
+ // (0 % x) must yield 0 (if x is finite, which is the case here).
+ __ cmp(left, Operand(0));
+ __ b(eq, &done);
+ // Preload right in a vfp register.
+ __ vmov(divisor.low(), right);
+ __ b(lt, &vfp_modulo);
- __ bind(&right_negative);
- // Negate right. The sign of the divisor does not matter.
- __ rsb(right, right, Operand(0));
-
- __ bind(&both_positive);
- const int kUnfolds = 3;
- // If the right hand side is smaller than the (nonnegative)
- // left hand side, the left hand side is the result.
- // Else try a few subtractions of the left hand side.
- __ mov(scratch, left);
- for (int i = 0; i < kUnfolds; i++) {
- // Check if the left hand side is less or equal than the
- // the right hand side.
- __ cmp(scratch, Operand(right));
- __ mov(result, scratch, LeaveCC, lt);
+ __ cmp(left, Operand(right));
__ b(lt, &done);
- // If not, reduce the left hand side by the right hand
- // side and check again.
- if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
- }
-
- __ bind(&vfp_modulo);
- // Load the arguments in VFP registers.
- // The divisor value is preloaded before. Be careful that 'right' is only live
- // on entry.
- __ vmov(dividend.low(), left);
- // From here on don't use right as it may have been reallocated (for example
- // to scratch2).
- right = no_reg;
-
- __ vcvt_f64_s32(dividend, dividend.low());
- __ vcvt_f64_s32(divisor, divisor.low());
-
- // We do not care about the sign of the divisor.
- __ vabs(divisor, divisor);
- // Compute the quotient and round it to a 32bit integer.
- __ vdiv(quotient, dividend, divisor);
- __ vcvt_s32_f64(quotient.low(), quotient);
- __ vcvt_f64_s32(quotient, quotient.low());
-
- // Compute the remainder in result.
- DwVfpRegister double_scratch = dividend;
- __ vmul(double_scratch, divisor, quotient);
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
- __ vmov(scratch, double_scratch.low());
-
- if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ sub(result, left, scratch);
- } else {
- Label ok;
- // Check for -0.
- __ sub(scratch2, left, scratch, SetCC);
- __ b(ne, &ok);
- __ cmp(left, Operand(0));
- DeoptimizeIf(mi, instr->environment());
- __ bind(&ok);
- // Load the result and we are done.
- __ mov(result, scratch2);
- }
+ // Check for (positive) power of two on the right hand side.
+ __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
+ scratch,
+ &right_negative,
+ &both_positive);
+ // Perform modulo operation (scratch contains right - 1).
+ __ and_(result, scratch, Operand(left));
+ __ b(&done);
+
+ __ bind(&right_negative);
+ // Negate right. The sign of the divisor does not matter.
+ __ rsb(right, right, Operand(0));
+
+ __ bind(&both_positive);
+ const int kUnfolds = 3;
+ // If the right hand side is smaller than the (nonnegative)
+ // left hand side, the left hand side is the result.
+ // Else try a few subtractions of the left hand side.
+ __ mov(scratch, left);
+ for (int i = 0; i < kUnfolds; i++) {
+ // Check if the left hand side is less or equal than the
+ // the right hand side.
+ __ cmp(scratch, Operand(right));
+ __ mov(result, scratch, LeaveCC, lt);
+ __ b(lt, &done);
+ // If not, reduce the left hand side by the right hand
+ // side and check again.
+ if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
+ }
+
+ __ bind(&vfp_modulo);
+ // Load the arguments in VFP registers.
+ // The divisor value is preloaded before. Be careful that 'right'
+ // is only live on entry.
+ __ vmov(dividend.low(), left);
+ // From here on don't use right as it may have been reallocated
+ // (for example to scratch2).
+ right = no_reg;
+
+ __ vcvt_f64_s32(dividend, dividend.low());
+ __ vcvt_f64_s32(divisor, divisor.low());
+
+ // We do not care about the sign of the divisor.
+ __ vabs(divisor, divisor);
+ // Compute the quotient and round it to a 32bit integer.
+ __ vdiv(quotient, dividend, divisor);
+ __ vcvt_s32_f64(quotient.low(), quotient);
+ __ vcvt_f64_s32(quotient, quotient.low());
+
+ // Compute the remainder in result.
+ DwVfpRegister double_scratch = dividend;
+ __ vmul(double_scratch, divisor, quotient);
+ __ vcvt_s32_f64(double_scratch.low(), double_scratch);
+ __ vmov(scratch, double_scratch.low());
+
+ if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ sub(result, left, scratch);
+ } else {
+ Label ok;
+ // Check for -0.
+ __ sub(scratch2, left, scratch, SetCC);
+ __ b(ne, &ok);
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(mi, instr->environment());
+ __ bind(&ok);
+ // Load the result and we are done.
+ __ mov(result, scratch2);
+ }
+ }
__ bind(&done);
}
@@ -1135,15 +1220,18 @@ void LCodeGen::DoDivI(LDivI* instr) {
DeferredDivI(LCodeGen* codegen, LDivI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
+ codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
+ instr_->left(),
+ instr_->right(),
+ Token::DIV);
}
virtual LInstruction* instr() { return instr_; }
private:
LDivI* instr_;
};
- const Register left = ToRegister(instr->InputAt(0));
- const Register right = ToRegister(instr->InputAt(1));
+ const Register left = ToRegister(instr->left());
+ const Register right = ToRegister(instr->right());
const Register scratch = scratch0();
const Register result = ToRegister(instr->result());
@@ -1191,7 +1279,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
- DeferredDivI* deferred = new DeferredDivI(this, instr);
+ DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
__ TrySmiTag(left, &deoptimize, scratch);
__ TrySmiTag(right, &deoptimize, scratch);
@@ -1212,15 +1300,15 @@ void LCodeGen::DoDivI(LDivI* instr) {
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->InputAt(0));
- const Register remainder = ToRegister(instr->TempAt(0));
+ const Register left = ToRegister(instr->left());
+ const Register remainder = ToRegister(instr->temp());
const Register scratch = scratch0();
// We only optimize this for division by constants, because the standard
// integer division routine is usually slower than transitionning to VFP.
// This could be optimized on processors with SDIV available.
- ASSERT(instr->InputAt(1)->IsConstantOperand());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
+ ASSERT(instr->right()->IsConstantOperand());
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
if (divisor < 0) {
__ cmp(left, Operand(0));
DeoptimizeIf(eq, instr->environment());
@@ -1238,11 +1326,12 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
}
-template<int T>
-void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
+void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
+ LOperand* left_argument,
+ LOperand* right_argument,
Token::Value op) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Register left = ToRegister(left_argument);
+ Register right = ToRegister(right_argument);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
// Move left to r1 and right to r0 for the stub call.
@@ -1261,7 +1350,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
}
BinaryOpStub stub(op, OVERWRITE_LEFT);
__ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
+ RecordSafepointWithRegistersAndDoubles(pointer_map,
0,
Safepoint::kNoLazyDeopt);
// Overwrite the stored value of r0 with the result of the stub.
@@ -1273,8 +1362,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
// Note that result may alias left.
- Register left = ToRegister(instr->InputAt(0));
- LOperand* right_op = instr->InputAt(1);
+ Register left = ToRegister(instr->left());
+ LOperand* right_op = instr->right();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
@@ -1341,7 +1430,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
Register right = EmitLoadRegister(right_op, scratch);
if (bailout_on_minus_zero) {
- __ orr(ToRegister(instr->TempAt(0)), left, right);
+ __ orr(ToRegister(instr->temp()), left, right);
}
if (can_overflow) {
@@ -1358,7 +1447,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
- __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
+ __ cmp(ToRegister(instr->temp()), Operand(0));
DeoptimizeIf(mi, instr->environment());
__ bind(&done);
}
@@ -1367,8 +1456,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->InputAt(0);
- LOperand* right_op = instr->InputAt(1);
+ LOperand* left_op = instr->left();
+ LOperand* right_op = instr->right();
ASSERT(left_op->IsRegister());
Register left = ToRegister(left_op);
Register result = ToRegister(instr->result());
@@ -1401,14 +1490,17 @@ void LCodeGen::DoBitI(LBitI* instr) {
void LCodeGen::DoShiftI(LShiftI* instr) {
// Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
// result may alias either of them.
- LOperand* right_op = instr->InputAt(1);
- Register left = ToRegister(instr->InputAt(0));
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
if (right_op->IsRegister()) {
// Mask the right_op operand.
__ and_(scratch, ToRegister(right_op), Operand(0x1F));
switch (instr->op()) {
+ case Token::ROR:
+ __ mov(result, Operand(left, ROR, scratch));
+ break;
case Token::SAR:
__ mov(result, Operand(left, ASR, scratch));
break;
@@ -1432,6 +1524,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ mov(result, Operand(left, ROR, shift_count));
+ } else {
+ __ Move(result, left);
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ mov(result, Operand(left, ASR, shift_count));
@@ -1466,8 +1565,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
@@ -1496,7 +1595,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
double v = instr->value();
- __ Vmov(result, v);
+ __ Vmov(result, v, scratch0());
}
@@ -1513,21 +1612,28 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
}
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLength(result, map);
+}
+
+
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
// Load map into |result|.
__ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -1540,9 +1646,9 @@ void LCodeGen::DoElementsKind(LElementsKind* instr) {
void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->TempAt(0));
+ Register map = ToRegister(instr->temp());
Label done;
// If the object is a smi return the object.
@@ -1561,9 +1667,9 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
Label runtime, done;
ASSERT(object.is(result));
@@ -1571,11 +1677,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ tst(object, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- __ Assert(eq, "Trying to get date field from non-date.");
-#endif
+ DeoptimizeIf(ne, instr->environment());
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1601,14 +1706,14 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
__ mvn(result, Operand(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
+ Register input_reg = EmitLoadRegister(instr->value(), ip);
__ push(input_reg);
CallRuntime(Runtime::kThrow, 1, instr);
@@ -1619,8 +1724,8 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
@@ -1639,9 +1744,71 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Register left_reg = ToRegister(left);
+ Operand right_op = (right->IsRegister() || right->IsConstantOperand())
+ ? ToOperand(right)
+ : Operand(EmitLoadRegister(right, ip));
+ Register result_reg = ToRegister(instr->result());
+ __ cmp(left_reg, right_op);
+ if (!result_reg.is(left_reg)) {
+ __ mov(result_reg, left_reg, LeaveCC, condition);
+ }
+ __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister left_reg = ToDoubleRegister(left);
+ DoubleRegister right_reg = ToDoubleRegister(right);
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Label check_nan_left, check_zero, return_left, return_right, done;
+ __ VFPCompareAndSetFlags(left_reg, right_reg);
+ __ b(vs, &check_nan_left);
+ __ b(eq, &check_zero);
+ __ b(condition, &return_left);
+ __ b(al, &return_right);
+
+ __ bind(&check_zero);
+ __ VFPCompareAndSetFlags(left_reg, 0.0);
+ __ b(ne, &return_left); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ // We could use a single 'vorr' instruction here if we had NEON support.
+ __ vneg(left_reg, left_reg);
+ __ vsub(result_reg, left_reg, right_reg);
+ __ vneg(result_reg, result_reg);
+ } else {
+ // Since we operate on +0 and/or -0, vadd and vand have the same effect;
+ // the decision for vadd is easy because vand is a NEON instruction.
+ __ vadd(result_reg, left_reg, right_reg);
+ }
+ __ b(al, &done);
+
+ __ bind(&check_nan_left);
+ __ VFPCompareAndSetFlags(left_reg, left_reg);
+ __ b(vs, &return_left); // left == NaN.
+ __ bind(&return_right);
+ if (!right_reg.is(result_reg)) {
+ __ vmov(result_reg, right_reg);
+ }
+ __ b(al, &done);
+
+ __ bind(&return_left);
+ if (!left_reg.is(result_reg)) {
+ __ vmov(result_reg, left_reg);
+ }
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
- DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
@@ -1680,11 +1847,14 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r1));
- ASSERT(ToRegister(instr->InputAt(1)).is(r0));
+ ASSERT(ToRegister(instr->left()).is(r1));
+ ASSERT(ToRegister(instr->right()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ // Block literal pool emission to ensure nop indicating no inlined smi code
+ // is in the correct position.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@@ -1723,11 +1893,11 @@ void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
__ cmp(reg, Operand(0));
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
// Test the double value. Zero and NaN are false.
@@ -1736,7 +1906,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(true_block, false_block, eq);
} else {
ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
@@ -1875,8 +2045,8 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cond = TokenToCondition(instr->op(), false);
@@ -1916,8 +2086,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1927,7 +2097,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
+ Register left = ToRegister(instr->left());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1938,7 +2108,7 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register scratch = scratch0();
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely
@@ -2006,8 +2176,8 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2032,8 +2202,8 @@ Condition LCodeGen::EmitIsString(Register input,
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2050,15 +2220,15 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
+ Register input_reg = EmitLoadRegister(instr->value(), ip);
__ tst(input_reg, Operand(kSmiTagMask));
EmitBranch(true_block, false_block, eq);
}
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2128,7 +2298,7 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2143,12 +2313,10 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
+ __ AssertString(input);
__ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -2157,7 +2325,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register scratch = scratch0();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2238,9 +2406,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register temp = scratch0();
- Register temp2 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->temp());
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2256,8 +2424,8 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
@@ -2268,8 +2436,8 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
+ ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
+ ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -2297,11 +2465,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register object = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
ASSERT(object.is(r0));
@@ -2316,20 +2484,26 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Label cache_miss;
Register map = temp;
__ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- __ cmp(map, Operand(ip));
- __ b(ne, &cache_miss);
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ mov(result, Operand(factory()->the_hole_value()));
+ {
+ // Block constant pool emission to ensure the positions of instructions are
+ // as expected by the patcher. See InstanceofStub::Generate().
+ Assembler::BlockConstPoolScope block_const_pool(masm());
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch with
+ // the cached map.
+ PredictableCodeSizeScope predictable(masm_);
+ Handle<JSGlobalPropertyCell> cell =
+ factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+ __ mov(ip, Operand(Handle<Object>(cell)));
+ __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ cmp(map, Operand(ip));
+ __ b(ne, &cache_miss);
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch
+ // with true or false.
+ __ mov(result, Operand(factory()->the_hole_value()));
+ }
__ b(&done);
// The inlined call site cache did not match. Check null and string before
@@ -2376,15 +2550,24 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// Get the temp register reserved by the instruction. This needs to be r4 as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
ASSERT(temp.is(r4));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 4;
+ static const int kAdditionalDelta = 5;
+ // Make sure that code size is predicable, since we use specific constants
+ // offsets in the code to find embedded values..
+ PredictableCodeSizeScope predictable(masm_);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
+ // The mov above can generate one or two instructions. The delta was computed
+ // for two instructions, so we need to pad here in case of one instruction.
+ if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
+ ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
+ __ nop();
+ }
__ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
@@ -2467,7 +2650,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// it as no longer deleted.
if (instr->hydrogen()->RequiresHoleCheck()) {
// We use a temp to check the payload (CompareRoot might clobber ip).
- Register payload = ToRegister(instr->TempAt(0));
+ Register payload = ToRegister(instr->temp());
__ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment());
@@ -2546,7 +2729,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
@@ -2560,12 +2743,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name) {
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
- type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
- if (lookup.type() == FIELD) {
+ type->LookupDescriptor(NULL, *name, &lookup);
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2577,9 +2760,23 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
- } else {
+ } else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
+ Heap* heap = type->GetHeap();
+ while (*current != heap->null_value()) {
+ __ LoadHeapObject(result, current);
+ __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
+ __ cmp(result, Operand(Handle<Map>(current->map())));
+ DeoptimizeIf(ne, env);
+ current =
+ Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
+ }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
@@ -2587,7 +2784,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
- Register scratch = scratch0();
+ Register object_map = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
@@ -2598,18 +2795,24 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
}
Handle<String> name = instr->hydrogen()->name();
Label done;
- __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ cmp(scratch, Operand(map));
+ Label check_passed;
+ __ CompareMap(
+ object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
DeoptimizeIf(ne, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
} else {
Label next;
__ b(ne, &next);
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
__ b(&done);
__ bind(&next);
}
@@ -2617,7 +2820,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
if (need_generic) {
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
__ bind(&done);
}
@@ -2630,7 +2833,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2680,7 +2883,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->object());
Register scratch = scratch0();
__ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
@@ -2697,8 +2900,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ ubfx(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
- __ cmp(scratch, Operand(FAST_ELEMENTS));
- __ b(eq, &done);
+ __ cmp(scratch, Operand(GetInitialFastElementsKind()));
+ __ b(lt, &fail);
+ __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
+ __ b(le, &done);
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(lt, &fail);
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@@ -2713,7 +2918,7 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->InputAt(0));
+ Register from_reg = ToRegister(instr->object());
__ ldr(to_reg, FieldMemOperand(from_reg,
ExternalArray::kExternalPointerOffset));
}
@@ -2724,82 +2929,16 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
-
- // Bailout index is not a valid argument index. Use unsigned check to get
- // negative check for free.
- __ sub(length, length, index, SetCC);
- DeoptimizeIf(ls, instr->environment());
-
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more.
+ __ sub(length, length, index);
__ add(length, length, Operand(1));
__ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
}
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Register elements = ToRegister(instr->elements());
- Register key = EmitLoadRegister(instr->key(), scratch0());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Load the result.
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- uint32_t offset = FixedArray::kHeaderSize +
- (instr->additional_index() << kPointerSizeLog2);
- __ ldr(result, FieldMemOperand(scratch, offset));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int shift_size =
- ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
-
- Operand operand = key_is_constant
- ? Operand(((constant_key + instr->additional_index()) << shift_size) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(elements, elements, operand);
- if (!key_is_constant) {
- __ add(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << shift_size)));
- }
-
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
-
- __ vldr(result, elements, 0);
-}
-
-
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -2812,15 +2951,17 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
- ? Operand(constant_key << shift_size)
+ ? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
@@ -2831,15 +2972,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
}
} else {
Register result = ToRegister(instr->result());
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
- MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- (constant_key << shift_size) + additional_offset)
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
@@ -2859,17 +2995,19 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(result, mem_operand);
- __ cmp(result, Operand(0x80000000));
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(cs, instr->environment());
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ cmp(result, Operand(0x80000000));
+ DeoptimizeIf(cs, instr->environment());
+ }
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -2879,12 +3017,142 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
}
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ Operand operand = key_is_constant
+ ? Operand(((constant_key + instr->additional_index()) <<
+ element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+ : Operand(key, LSL, shift_size);
+ __ add(elements, elements, operand);
+ if (!key_is_constant) {
+ __ add(elements, elements,
+ Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+ (instr->additional_index() << element_size_shift)));
+ }
+
+ __ vldr(result, elements, 0);
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ Register key = EmitLoadRegister(instr->key(), scratch0());
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ add(scratch, elements,
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ ldr(result, FieldMemOperand(store_base, offset));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ tst(result, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, scratch);
+ DeoptimizeIf(eq, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset) {
+ if (additional_index != 0 && !key_is_constant) {
+ additional_index *= 1 << (element_size - shift_size);
+ __ add(scratch0(), key, Operand(additional_index));
+ }
+
+ if (key_is_constant) {
+ return MemOperand(base,
+ (constant_key << element_size) + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (shift_size >= 0) {
+ return MemOperand(base, key, LSL, shift_size);
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ return MemOperand(base, key, LSR, 1);
+ }
+ }
+
+ if (shift_size >= 0) {
+ return MemOperand(base, scratch0(), LSL, shift_size);
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ return MemOperand(base, scratch0(), LSR, 1);
+ }
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2910,7 +3178,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->InputAt(0));
+ Register elem = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
Label done;
@@ -3029,7 +3297,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
+ LOperand* argument = instr->value();
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
Abort("DoPushArgument not implemented for double type.");
} else {
@@ -3046,7 +3314,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ LoadHeapObject(result, instr->hydrogen()->closure());
+ __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -3076,12 +3344,14 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX));
+ __ ldr(result, ContextOperand(cp, instr->qml_global()
+ ? Context::QML_GLOBAL_OBJECT_INDEX
+ : Context::GLOBAL_OBJECT_INDEX));
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
+ Register global = ToRegister(instr->global_object());
Register result = ToRegister(instr->result());
__ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
}
@@ -3103,14 +3373,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadHeapObject(r1, function);
}
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- }
+ // Change context.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
@@ -3147,7 +3411,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3213,7 +3477,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
__ cmp(input, Operand(0));
__ Move(result, input, pl);
@@ -3243,7 +3507,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
+ DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
__ vabs(result, input);
} else if (r.IsInteger32()) {
@@ -3251,8 +3515,8 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
} else {
// Representation is tagged.
DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->InputAt(0));
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input, deferred->entry());
// If smi, handle it directly.
@@ -3263,29 +3527,24 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
- SwVfpRegister single_scratch = double_scratch0().low();
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
+ Register scratch = scratch0();
__ EmitVFPTruncate(kRoundToMinusInf,
- single_scratch,
+ result,
input,
- scratch1,
- scratch2);
+ scratch,
+ double_scratch0());
DeoptimizeIf(ne, instr->environment());
- // Move the result back to general purpose register r0.
- __ vmov(result, single_scratch);
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
- __ vmov(scratch1, input.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ __ vmov(scratch, input.high());
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
}
@@ -3293,8 +3552,9 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
+ DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
Label done, check_sign_on_zero;
@@ -3319,12 +3579,12 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
DeoptimizeIf(ge, instr->environment());
+ __ Vmov(double_scratch0(), 0.5, scratch);
+ __ vadd(double_scratch0(), input, double_scratch0());
+
// Save the original sign for later comparison.
__ and_(scratch, result, Operand(HeapNumber::kSignMask));
- __ Vmov(double_scratch0(), 0.5);
- __ vadd(double_scratch0(), input, double_scratch0());
-
// Check sign of the result: if the sign changed, the input
// value was in ]0.5, 0[ and the result should be -0.
__ vmov(result, double_scratch0().high());
@@ -3337,12 +3597,11 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
__ EmitVFPTruncate(kRoundToMinusInf,
- double_scratch0().low(),
- double_scratch0(),
result,
- scratch);
+ double_scratch0(),
+ scratch,
+ double_scratch1);
DeoptimizeIf(ne, instr->environment());
- __ vmov(result, double_scratch0().low());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
@@ -3358,22 +3617,22 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+ DoubleRegister temp = ToDoubleRegister(instr->temp());
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
// Math.sqrt(-Infinity) == NaN
Label done;
- __ vmov(temp, -V8_INFINITY);
+ __ vmov(temp, -V8_INFINITY, scratch0());
__ VFPCompareAndSetFlags(input, temp);
__ vneg(result, temp, eq);
__ b(&done, eq);
@@ -3389,11 +3648,11 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
- ToDoubleRegister(instr->InputAt(1)).is(d2));
- ASSERT(!instr->InputAt(1)->IsRegister() ||
- ToRegister(instr->InputAt(1)).is(r2));
- ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d2));
+ ASSERT(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(r2));
+ ASSERT(ToDoubleRegister(instr->left()).is(d1));
ASSERT(ToDoubleRegister(instr->result()).is(d3));
if (exponent_type.IsTagged()) {
@@ -3428,21 +3687,21 @@ void LCodeGen::DoRandom(LRandom* instr) {
LRandom* instr_;
};
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(d7));
- ASSERT(ToRegister(instr->InputAt(0)).is(r0));
+ ASSERT(ToRegister(instr->global_object()).is(r0));
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
- // r2: FixedArray of the global context's random seeds
+ // r2: FixedArray of the native context's random seeds
// Load state[0].
__ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
@@ -3590,7 +3849,7 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3603,7 +3862,7 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr);
+ CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3628,7 +3887,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr);
+ CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3644,7 +3903,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r1));
+ ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
@@ -3669,6 +3928,18 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (!instr->transition().is_null()) {
__ mov(scratch, Operand(instr->transition()));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ Register temp = ToRegister(instr->temp());
+ // Update the write barrier for the map field.
+ __ RecordWriteField(object,
+ HeapObject::kMapOffset,
+ scratch,
+ temp,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ }
}
// Do the store.
@@ -3716,104 +3987,50 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
- DeoptimizeIf(hs, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Register scratch = scratch0();
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset =
- (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
- + FixedArray::kHeaderSize;
- __ str(value, FieldMemOperand(elements, offset));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- if (instr->additional_index() != 0) {
- __ add(scratch,
- scratch,
- Operand(instr->additional_index() << kPointerSizeLog2));
+void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand) {
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
+ if (operand->IsRegister()) {
+ __ tst(ToRegister(operand), Operand(kSmiTagMask));
+ } else {
+ __ mov(ip, ToOperand(operand));
+ __ tst(ip, Operand(kSmiTagMask));
}
- __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
+ DeoptimizeIf(ne, environment);
}
}
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- DwVfpRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->length(),
+ instr->length());
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->index(),
+ instr->index());
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
+ __ mov(ip, Operand(Smi::FromInt(constant_index)));
+ } else {
+ __ mov(ip, Operand(constant_index));
}
+ __ cmp(ip, ToRegister(instr->length()));
} else {
- key = ToRegister(instr->key());
+ __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
}
- int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- Operand operand = key_is_constant
- ? Operand((constant_key << shift_size) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(scratch, elements, operand);
- if (!key_is_constant) {
- __ add(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- // Check for NaN. All NaNs must be canonicalized.
- __ VFPCompareAndSetFlags(value, value);
- // Only load canonical NaN if the comparison above set the overflow.
- __ Vmov(value,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
- vs);
- }
-
- __ vstr(value, scratch, instr->additional_index() << shift_size);
+ DeoptimizeIf(hs, instr->environment());
}
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
-
- Register external_pointer = ToRegister(instr->external_pointer());
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -3826,15 +4043,18 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant ? Operand(constant_key << shift_size)
- : Operand(key, LSL, shift_size));
+ Operand operand(key_is_constant
+ ? Operand(constant_key << element_size_shift)
+ : Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
@@ -3844,16 +4064,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
} else {
Register value(ToRegister(instr->value()));
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ add(scratch0(), key, Operand(instr->additional_index()));
- }
- MemOperand mem_operand(key_is_constant
- ? MemOperand(external_pointer,
- ((constant_key + instr->additional_index())
- << shift_size))
- : (instr->additional_index() == 0
- ? MemOperand(external_pointer, key, LSL, shift_size)
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -3872,7 +4086,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3882,6 +4099,110 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ Operand operand = key_is_constant
+ ? Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+ : Operand(key, LSL, shift_size);
+ __ add(scratch, elements, operand);
+ if (!key_is_constant) {
+ __ add(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ // Check for NaN. All NaNs must be canonicalized.
+ __ VFPCompareAndSetFlags(value, value);
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ Vmov(value,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
+ no_reg, vs);
+ }
+
+ __ vstr(value, scratch, instr->additional_index() << element_size_shift);
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+ : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ add(scratch, elements,
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ str(value, FieldMemOperand(store_base, offset));
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ add(key, store_base, Operand(offset - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r2));
ASSERT(ToRegister(instr->key()).is(r1));
@@ -3890,13 +4211,13 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
@@ -3909,21 +4230,23 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
__ mov(new_map_reg, Operand(to_map));
- if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
- } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
- to_kind == FAST_DOUBLE_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ } else if (IsFastSmiElementsKind(from_kind) &&
+ IsFastDoubleElementsKind(to_kind)) {
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
- } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ } else if (IsFastDoubleElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind)) {
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg);
@@ -3956,7 +4279,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
};
DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
@@ -3991,9 +4314,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ push(index);
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(r0);
- }
+ __ AssertSmi(r0);
__ SmiUntag(r0);
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -4011,7 +4332,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
};
DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -4048,14 +4369,14 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->InputAt(0));
+ Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
__ ldr(result, FieldMemOperand(string, String::kLengthOffset));
}
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
@@ -4071,30 +4392,73 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+
+ SwVfpRegister flt_scratch = double_scratch0().low();
+ __ vmov(flt_scratch, ToRegister(input));
+ __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI: public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->value(),
+ SIGNED_INT32);
+ }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
- Register src = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(instr->value());
Register dst = ToRegister(instr->result());
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTag(dst, src, SetCC);
__ b(vs, deferred->entry());
__ bind(deferred->exit());
}
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->value(),
+ UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ cmp(reg, Operand(Smi::kMaxValue));
+ __ b(hi, deferred->entry());
+ __ SmiTag(reg, reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness) {
Label slow;
- Register src = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
DoubleRegister dbl_scratch = double_scratch0();
SwVfpRegister flt_scratch = dbl_scratch.low();
@@ -4102,19 +4466,25 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
Label done;
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ eor(src, src, Operand(0x80000000));
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ eor(src, src, Operand(0x80000000));
+ }
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ } else {
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_u32(dbl_scratch, flt_scratch);
}
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+
if (FLAG_inline_new) {
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ Move(dst, r5);
__ b(&done);
}
@@ -4129,12 +4499,13 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
__ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, r0);
+ __ sub(dst, dst, Operand(kHeapObjectTag));
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sub(ip, dst, Operand(kHeapObjectTag));
- __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
+ __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ __ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4150,22 +4521,25 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ // We want the untagged address first for performance
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+ DONT_TAG_RESULT);
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ sub(ip, reg, Operand(kHeapObjectTag));
- __ vstr(input_reg, ip, HeapNumber::kValueOffset);
+ __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ // Now that we have finished with the object's real address tag it
+ __ add(reg, reg, Operand(kHeapObjectTag));
}
@@ -4178,18 +4552,19 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(r0, reg);
}
void LCodeGen::DoSmiTag(LSmiTag* instr) {
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
if (instr->needs_check()) {
STATIC_ASSERT(kHeapObjectTag == 1);
@@ -4261,11 +4636,11 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
+ Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_scratch = double_scratch0();
- SwVfpRegister single_scratch = double_scratch.low();
+ DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4284,8 +4659,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+ Register scratch3 = ToRegister(instr->temp2());
+ SwVfpRegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@@ -4320,14 +4695,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ sub(ip, input_reg, Operand(kHeapObjectTag));
__ vldr(double_scratch, ip, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
- single_scratch,
+ input_reg,
double_scratch,
scratch1,
- scratch2,
+ double_scratch2,
kCheckForInexactConversion);
DeoptimizeIf(ne, instr->environment());
- // Load the result.
- __ vmov(input_reg, single_scratch);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand(0));
@@ -4352,13 +4725,13 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LTaggedToI* instr_;
};
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
// Optimistically untag the input.
// If the input is a HeapObject, SmiUntag will set the carry flag.
@@ -4371,7 +4744,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
@@ -4389,14 +4762,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
- SwVfpRegister single_scratch = double_scratch0().low();
+ Register scratch2 = ToRegister(instr->temp());
+ DwVfpRegister double_input = ToDoubleRegister(instr->value());
Label done;
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
+ Register scratch3 = ToRegister(instr->temp2());
+ SwVfpRegister single_scratch = double_scratch0().low();
__ EmitECMATruncate(result_reg,
double_input,
single_scratch,
@@ -4404,39 +4777,38 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
scratch2,
scratch3);
} else {
- VFPRoundingMode rounding_mode = kRoundToMinusInf;
- __ EmitVFPTruncate(rounding_mode,
- single_scratch,
+ DwVfpRegister double_scratch = double_scratch0();
+ __ EmitVFPTruncate(kRoundToMinusInf,
+ result_reg,
double_input,
scratch1,
- scratch2,
+ double_scratch,
kCheckForInexactConversion);
+
// Deoptimize if we had a vfp invalid exception,
// including inexact operation.
DeoptimizeIf(ne, instr->environment());
- // Retrieve the result.
- __ vmov(result_reg, single_scratch);
}
__ bind(&done);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
__ tst(ToRegister(input), Operand(kSmiTagMask));
DeoptimizeIf(ne, instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
__ tst(ToRegister(input), Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -4509,7 +4881,7 @@ void LCodeGen::DoCheckMapCommon(Register reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register scratch = scratch0();
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
@@ -4529,7 +4901,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
__ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
}
@@ -4545,7 +4917,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
@@ -4579,8 +4951,9 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ ASSERT(instr->temp()->Equals(instr->result()));
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
@@ -4603,7 +4976,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
DoCheckMapCommon(temp1, temp2,
Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- DeoptimizeIf(ne, instr->environment());
}
@@ -4618,11 +4990,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
LAllocateObject* instr_;
};
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
- Register scratch2 = ToRegister(instr->TempAt(1));
+ Register scratch = ToRegister(instr->temp());
+ Register scratch2 = ToRegister(instr->temp2());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
@@ -4690,14 +5063,15 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Heap* heap = isolate()->heap();
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
+ if (CanTransitionToMoreGeneralFastElementsKind(
+ boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -4709,12 +5083,12 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
DeoptimizeIf(ne, instr->environment());
}
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ // Set up the parameters to the stub/runtime call.
+ __ LoadHeapObject(r3, literals);
__ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
- __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
+ __ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
@@ -4808,8 +5182,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
// We only support little endian mode...
- int32_t value_low = value & 0xFFFFFFFF;
- int32_t value_high = value >> 32;
+ int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
+ int32_t value_high = static_cast<int32_t>(value >> 32);
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ mov(r2, Operand(value_low));
@@ -4848,10 +5222,11 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
- // Deopt if the literal boilerplate ElementsKind is of a type different than
- // the expected one. The check isn't necessary if the boilerplate has already
- // been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
+ if (CanTransitionToMoreGeneralFastElementsKind(
+ boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -4912,7 +5287,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0));
+ ASSERT(ToRegister(instr->value()).is(r0));
__ push(r0);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
@@ -4921,15 +5296,13 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
- // r3 = JS function.
// r7 = literals array.
// r1 = regexp literal.
// r0 = regexp literal clone.
// r2 and r4-r6 are used as temporaries.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(r7, instr->hydrogen()->literals());
__ ldr(r1, FieldMemOperand(r7, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
@@ -4995,14 +5368,14 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
__ push(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -5092,7 +5465,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp1 = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -5126,6 +5499,8 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
int current_pc = masm()->pc_offset();
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ // Block literal pool emission for duration of padding.
+ Assembler::BlockConstPoolScope block_const_pool(masm());
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
@@ -5211,6 +5586,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
StackCheckStub stub;
+ PredictableCodeSizeScope predictable(masm_);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
@@ -5220,7 +5596,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
@@ -5291,13 +5667,23 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ cmp(result, Operand(Smi::FromInt(0)));
+ __ b(ne, &load_cache);
+ __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ jmp(&done);
+
+ __ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ ldr(result,
- FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand(0));
DeoptimizeIf(eq, instr->environment());
+
+ __ bind(&done);
}
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h b/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
index c6a3af7..921285b 100644
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
+++ b/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
@@ -44,21 +44,24 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : chunk_(chunk),
+ : zone_(info->zone()),
+ chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4),
- deopt_jump_table_(4),
- deoptimization_literals_(8),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- deferred_(8),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -71,6 +74,7 @@ class LCodeGen BASE_EMBEDDED {
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
+ Zone* zone() const { return zone_; }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
@@ -106,11 +110,17 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code);
// Deferred code support.
- template<int T>
- void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
+ void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
+ LOperand* left_argument,
+ LOperand* right_argument,
Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness);
+
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -128,8 +138,20 @@ class LCodeGen BASE_EMBEDDED {
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
+ MemOperand PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset);
+
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
+ void WriteTranslation(LEnvironment* environment,
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -153,7 +175,7 @@ class LCodeGen BASE_EMBEDDED {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -173,10 +195,10 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
void Comment(const char* format, ...);
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
@@ -191,14 +213,18 @@ class LCodeGen BASE_EMBEDDED {
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
+ void CallCode(
+ Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
+ void CallCodeGeneric(
+ Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode,
+ TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
@@ -239,7 +265,10 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged);
+ bool is_tagged,
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -284,6 +313,10 @@ class LCodeGen BASE_EMBEDDED {
bool deoptimize_on_minus_zero,
LEnvironment* env);
+ void DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand);
+
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -314,7 +347,8 @@ class LCodeGen BASE_EMBEDDED {
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name);
+ Handle<String> name,
+ LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
@@ -343,8 +377,15 @@ class LCodeGen BASE_EMBEDDED {
};
void EnsureSpaceForLazyDeopt();
-
- LChunk* const chunk_;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+ Zone* zone_;
+ LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
index cefca47..c100720 100644
--- a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -36,7 +36,7 @@ namespace internal {
static const Register kSavedValueRegister = { 9 };
LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
+ : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL) { }
@@ -79,7 +79,7 @@ void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
index 4da2fec..dcc7149 100644
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
+++ b/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
@@ -108,7 +108,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
int MacroAssembler::CallSize(Register target, Condition cond) {
-#if USE_BLX
+#ifdef USE_BLX
return kInstrSize;
#else
return 2 * kInstrSize;
@@ -121,7 +121,7 @@ void MacroAssembler::Call(Register target, Condition cond) {
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
-#if USE_BLX
+#ifdef USE_BLX
blx(target, cond);
#else
// set lr for return at current pc + 8
@@ -137,7 +137,19 @@ int MacroAssembler::CallSize(
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
+ if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
+ size += kInstrSize;
+ }
+ return size;
+}
+
+
+int MacroAssembler::CallSizeNotPredictableCodeSize(
+ Address target, RelocInfo::Mode rmode, Condition cond) {
+ int size = 2 * kInstrSize;
+ Instr mov_instr = cond | MOV | LeaveCC;
+ intptr_t immediate = reinterpret_cast<intptr_t>(target);
+ if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
size += kInstrSize;
}
return size;
@@ -146,15 +158,29 @@ int MacroAssembler::CallSize(
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
- Condition cond) {
+ Condition cond,
+ TargetAddressStorageMode mode) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
-#if USE_BLX
- // On ARMv5 and after the recommended call sequence is:
- // ldr ip, [pc, #...]
- // blx ip
+
+ bool old_predictable_code_size = predictable_code_size();
+ if (mode == NEVER_INLINE_TARGET_ADDRESS) {
+ set_predictable_code_size(true);
+ }
+
+#ifdef USE_BLX
+ // Call sequence on V7 or later may be :
+ // movw ip, #... @ call address low 16
+ // movt ip, #... @ call address high 16
+ // blx ip
+ // @ return address
+ // Or for pre-V7 or values that may be back-patched
+ // to avoid ICache flushes:
+ // ldr ip, [pc, #...] @ call address
+ // blx ip
+ // @ return address
// Statement positions are expected to be recorded when the target
// address is loaded. The mov method will automatically record
@@ -165,21 +191,22 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
- ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
- ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
+ if (mode == NEVER_INLINE_TARGET_ADDRESS) {
+ set_predictable_code_size(old_predictable_code_size);
+ }
}
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond) {
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
@@ -187,19 +214,18 @@ int MacroAssembler::CallSize(Handle<Code> code,
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
- Condition cond) {
+ TypeFeedbackId ast_id,
+ Condition cond,
+ TargetAddressStorageMode mode) {
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
// 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<Address>(code.location()), rmode, cond);
- ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
- SizeOfCodeGeneratedSince(&start));
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
@@ -265,8 +291,8 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
- CpuFeatures::Scope scope(VFP3);
+ ASSERT(CpuFeatures::IsSupported(VFP2));
+ CpuFeatures::Scope scope(VFP2);
if (!dst.is(src)) {
vmov(dst, src);
}
@@ -276,17 +302,15 @@ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
- !src2.must_use_constant_pool() &&
+ !src2.must_output_reloc_info(this) &&
src2.immediate() == 0) {
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
-
- } else if (!src2.is_single_instruction() &&
- !src2.must_use_constant_pool() &&
+ } else if (!src2.is_single_instruction(this) &&
+ !src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
-
} else {
and_(dst, src1, src2, LeaveCC, cond);
}
@@ -296,7 +320,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
@@ -311,7 +335,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
@@ -339,7 +363,7 @@ void MacroAssembler::Bfi(Register dst,
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
@@ -351,12 +375,14 @@ void MacroAssembler::Bfi(Register dst,
}
-void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
+void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
+ Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- bic(dst, dst, Operand(mask));
+ bic(dst, src, Operand(mask));
} else {
+ Move(dst, src, cond);
bfc(dst, lsb, width, cond);
}
}
@@ -364,7 +390,7 @@ void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
@@ -396,6 +422,16 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
+ if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ !Heap::RootCanBeWrittenAfterInitialization(index)) {
+ Handle<Object> root(isolate()->heap()->roots_array_start()[index]);
+ if (!isolate()->heap()->InNewSpace(*root)) {
+ // The CPU supports fast immediate values, and this root will never
+ // change. We will load it as a relocatable immediate value.
+ mov(destination, Operand(root), LeaveCC, cond);
+ return;
+ }
+ }
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@@ -672,7 +708,7 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
// Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
@@ -714,7 +750,7 @@ void MacroAssembler::Strd(Register src1, Register src2,
ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
// Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
@@ -777,8 +813,9 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
+ const Register scratch,
const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
@@ -788,7 +825,7 @@ void MacroAssembler::Vmov(const DwVfpRegister dst,
} else if (value.bits == minus_zero.bits) {
vneg(dst, kDoubleRegZero, cond);
} else {
- vmov(dst, imm, cond);
+ vmov(dst, imm, scratch, cond);
}
}
@@ -930,6 +967,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+ ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@@ -1338,31 +1376,32 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(ne, "we should not have an empty lexical context");
#endif
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
ldr(scratch, FieldMemOperand(scratch, offset));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the global_context_map.
+ // Read the first word and compare to the native_context_map.
ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::global_context should be a global context.");
+ Check(eq, "JSGlobalObject::native_context should be a native context.");
pop(holder_reg); // Restore holder.
}
// Check if both contexts are the same.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
cmp(scratch, Operand(ip));
b(eq, &same_contexts);
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
@@ -1374,13 +1413,13 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(ne, "JSGlobalProxy::context() should not be null.");
ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::global_context should be a global context.");
+ Check(eq, "JSGlobalObject::native_context should be a native context.");
// Restore ip is not needed. ip is reloaded below.
pop(holder_reg); // Restore holder.
// Restore ip to holder's context.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
}
// Check that the security token in the calling global object is
@@ -1553,7 +1592,11 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Register topaddr = scratch1;
Register obj_size_reg = scratch2;
mov(topaddr, Operand(new_space_allocation_top));
- mov(obj_size_reg, Operand(object_size));
+ Operand obj_size_operand = Operand(object_size);
+ if (!obj_size_operand.is_single_instruction(this)) {
+ // We are about to steal IP, so we need to load this value first
+ mov(obj_size_reg, obj_size_operand);
+ }
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
@@ -1575,7 +1618,13 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- add(scratch2, result, Operand(obj_size_reg), SetCC);
+ if (obj_size_operand.is_single_instruction(this)) {
+ // We can add the size as an immediate
+ add(scratch2, result, obj_size_operand, SetCC);
+ } else {
+ // Doesn't fit in an immediate, we have to use the register
+ add(scratch2, result, obj_size_reg, SetCC);
+ }
b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
@@ -1868,10 +1917,12 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
@@ -1879,22 +1930,25 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(ls, fail);
- cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(hi, fail);
}
@@ -1962,13 +2016,13 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
- Register untagged_value = receiver_reg;
+ Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(this,
untagged_value,
@@ -1979,7 +2033,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
vstr(d0, scratch1, 0);
} else {
str(mantissa_reg, MemOperand(scratch1, 0));
@@ -1995,24 +2049,27 @@ void MacroAssembler::CompareMap(Register obj,
Label* early_success,
CompareMapMode mode) {
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- cmp(scratch, Operand(map));
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- Map* transitioned_fast_element_map(
- map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
- ASSERT(transitioned_fast_element_map == NULL ||
- map->elements_kind() != FAST_ELEMENTS);
- if (transitioned_fast_element_map != NULL) {
- b(eq, early_success);
- cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
- }
+ CompareMap(scratch, map, early_success, mode);
+}
+
- Map* transitioned_double_map(
- map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
- ASSERT(transitioned_double_map == NULL ||
- map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
- if (transitioned_double_map != NULL) {
- b(eq, early_success);
- cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
+void MacroAssembler::CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode) {
+ cmp(obj_map, Operand(map));
+ if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+ ElementsKind kind = map->elements_kind();
+ if (IsFastElementsKind(kind)) {
+ bool packed = IsFastPackedElementsKind(kind);
+ Map* current_map = *map;
+ while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
+ kind = GetNextMoreGeneralFastElementsKind(kind, packed);
+ current_map = current_map->LookupElementsTransitionMap(kind);
+ if (!current_map) break;
+ b(eq, early_success);
+ cmp(obj_map, Operand(Handle<Map>(current_map)));
+ }
}
}
}
@@ -2127,7 +2184,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), cond);
}
@@ -2323,8 +2380,8 @@ void MacroAssembler::ConvertToInt32(Register source,
Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(double_scratch, scratch, HeapNumber::kValueOffset);
vcvt_s32_f64(double_scratch.low(), double_scratch);
@@ -2414,16 +2471,27 @@ void MacroAssembler::ConvertToInt32(Register source,
void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
+ Register result,
DwVfpRegister double_input,
- Register scratch1,
- Register scratch2,
+ Register scratch,
+ DwVfpRegister double_scratch,
CheckForInexactConversion check_inexact) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
- CpuFeatures::Scope scope(VFP3);
- Register prev_fpscr = scratch1;
- Register scratch = scratch2;
+ ASSERT(!result.is(scratch));
+ ASSERT(!double_input.is(double_scratch));
+
+ ASSERT(CpuFeatures::IsSupported(VFP2));
+ CpuFeatures::Scope scope(VFP2);
+ Register prev_fpscr = result;
+ Label done;
+ // Test for values that can be exactly represented as a signed 32-bit integer.
+ vcvt_s32_f64(double_scratch.low(), double_input);
+ vmov(result, double_scratch.low());
+ vcvt_f64_s32(double_scratch, double_scratch.low());
+ VFPCompareAndSetFlags(double_input, double_scratch);
+ b(eq, &done);
+
+ // Convert to integer, respecting rounding mode.
int32_t check_inexact_conversion =
(check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
@@ -2445,7 +2513,7 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
vmsr(scratch);
// Convert the argument to an integer.
- vcvt_s32_f64(result,
+ vcvt_s32_f64(double_scratch.low(),
double_input,
(rounding_mode == kRoundToZero) ? kDefaultRoundToZero
: kFPSCRRounding);
@@ -2454,8 +2522,12 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
vmrs(scratch);
// Restore FPSCR.
vmsr(prev_fpscr);
+ // Move the converted value into the result register.
+ vmov(result, double_scratch.low());
// Check for vfp exceptions.
tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
+
+ bind(&done);
}
@@ -2538,7 +2610,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
Register scratch,
Register input_high,
Register input_low) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
ASSERT(!input_high.is(result));
ASSERT(!input_low.is(result));
ASSERT(!input_low.is(input_high));
@@ -2577,7 +2649,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
@@ -2695,7 +2767,8 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ ldr(target,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
ldr(target, FieldMemOperand(target,
@@ -2861,32 +2934,44 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ ldr(scratch,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- int expected_index =
- Context::GetContextMapIndexFromElementsKind(expected_kind);
- ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ ldr(scratch,
+ MemOperand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+ size_t offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ ldr(ip, FieldMemOperand(scratch, offset));
cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
- int trans_index =
- Context::GetContextMapIndexFromElementsKind(transitioned_kind);
- ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ ldr(map_in_out, FieldMemOperand(scratch, offset));
}
void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch, Register map_out) {
+ Register function_in, Register scratch,
+ Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
+ ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ kind,
+ map_out,
+ scratch,
+ &done);
+ } else if (can_have_holes) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_HOLEY_SMI_ELEMENTS,
map_out,
scratch,
&done);
@@ -2897,11 +2982,12 @@ void MacroAssembler::LoadInitialArrayMap(
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
+ ldr(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
ldr(function, FieldMemOperand(function,
- GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
ldr(function, MemOperand(function, Context::SlotOffset(index)));
}
@@ -2981,38 +3067,46 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
}
-void MacroAssembler::AbortIfSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(ne, "Operand is a smi");
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, "Operand is a smi");
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(eq, "Operand is not smi");
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(eq, "Operand is not smi");
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Assert(ne, "Operand is not a string");
- push(object);
- ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(lo, "Operand is not a string");
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, "Operand is a smi and not a string");
+ push(object);
+ ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(lo, "Operand is not a string");
+ }
}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- CompareRoot(src, root_value_index);
- Assert(eq, message);
+void MacroAssembler::AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ if (emit_debug_code()) {
+ CompareRoot(src, root_value_index);
+ Check(eq, message);
+ }
}
@@ -3070,7 +3164,8 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required) {
+ Label* gc_required,
+ TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
AllocateInNewSpace(HeapNumber::kSize,
@@ -3078,11 +3173,16 @@ void MacroAssembler::AllocateHeapNumber(Register result,
scratch1,
scratch2,
gc_required,
- TAG_OBJECT);
+ tagging_mode == TAG_RESULT ? TAG_OBJECT :
+ NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ if (tagging_mode == TAG_RESULT) {
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
}
@@ -3153,17 +3253,17 @@ void MacroAssembler::CopyBytes(Register src,
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
-#if CAN_USE_UNALIGNED_ACCESSES
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
-#else
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
-#endif
+ if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+ str(scratch, MemOperand(dst, kPointerSize, PostIndex));
+ } else {
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ }
sub(length, length, Operand(kPointerSize));
b(&word_loop);
@@ -3313,6 +3413,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+ ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
} else {
@@ -3323,6 +3424,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) {
+ ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
ASSERT(!dreg1.is(d1));
@@ -3341,6 +3443,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) {
+ ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
Move(r0, reg);
@@ -3442,7 +3545,7 @@ void MacroAssembler::CheckPageFlag(
int mask,
Condition cc,
Label* condition_met) {
- and_(scratch, object, Operand(~Page::kPageAlignmentMask));
+ Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
b(cc, condition_met);
@@ -3591,7 +3694,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask));
@@ -3637,7 +3740,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// Double value is >= 255, return 255.
bind(&above_zero);
- Vmov(temp_double_reg, 255.0);
+ Vmov(temp_double_reg, 255.0, result_reg);
VFPCompareAndSetFlags(input_reg, temp_double_reg);
b(le, &in_bounds);
mov(result_reg, Operand(255));
@@ -3645,67 +3748,72 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// In 0-255 range, round and truncate.
bind(&in_bounds);
- Vmov(temp_double_reg, 0.5);
- vadd(temp_double_reg, input_reg, temp_double_reg);
- vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
- vmov(result_reg, temp_double_reg.low());
+ // Save FPSCR.
+ vmrs(ip);
+ // Set rounding mode to round to the nearest integer by clearing bits[23:22].
+ bic(result_reg, ip, Operand(kVFPRoundingModeMask));
+ vmsr(result_reg);
+ vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
+ vmov(result_reg, input_reg.low());
+ // Restore FPSCR.
+ vmsr(ip);
bind(&done);
}
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- ldr(descriptors,
- FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, &not_smi);
- mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
- bind(&not_smi);
+ ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
}
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next;
- // Preload a couple of values used in the loop.
Register empty_fixed_array_value = r6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r7;
- LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- mov(r1, r0);
- bind(&next);
+ Label next, start;
+ mov(r2, r0);
- // Check that there are no elements. Register r1 contains the
- // current JS object we've reached through the prototype chain.
- ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- cmp(r2, empty_fixed_array_value);
- b(ne, call_runtime);
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in r2 for the subsequent
- // prototype load.
- ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(r3, call_runtime);
+ EnumLength(r3, r1);
+ cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
+ b(eq, call_runtime);
- // Check that there is an enum cache in the non-empty instance
- // descriptors (r3). This is the case if the next enumeration
- // index field does not contain a smi.
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(r3, call_runtime);
+ jmp(&start);
+
+ bind(&next);
+ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- cmp(r1, r0);
- b(eq, &check_prototype);
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
- cmp(r3, empty_fixed_array_value);
+ EnumLength(r3, r1);
+ cmp(r3, Operand(Smi::FromInt(0)));
+ b(ne, call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register r2 contains the current JS
+ // object we've reached through the prototype chain.
+ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
+ cmp(r2, empty_fixed_array_value);
b(ne, call_runtime);
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
- ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
- cmp(r1, null_value);
+ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
+ cmp(r2, null_value);
b(ne, &next);
}
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.h b/src/3rdparty/v8/src/arm/macro-assembler-arm.h
index 5a5469f..0ff8579 100644
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.h
+++ b/src/3rdparty/v8/src/arm/macro-assembler-arm.h
@@ -68,6 +68,13 @@ enum AllocationFlags {
SIZE_IN_WORDS = 1 << 2
};
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
+};
// Flags used for the ObjectToDoubleVFPRegister function.
enum ObjectToDoubleFlags {
@@ -95,6 +102,11 @@ bool AreAliased(Register reg1,
#endif
+enum TargetAddressStorageMode {
+ CAN_INLINE_TARGET_ADDRESS,
+ NEVER_INLINE_TARGET_ADDRESS
+};
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -110,18 +122,22 @@ class MacroAssembler: public Assembler {
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
- static int CallSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond = al);
- void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
- Condition cond = al);
+ int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ static int CallSizeNotPredictableCodeSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
+ void Call(Address target, RelocInfo::Mode rmode,
+ Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
- Condition cond = al);
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
void Ret(Condition cond = al);
// Emit code to discard a non-negative number of pointer-sized elements
@@ -153,7 +169,7 @@ class MacroAssembler: public Assembler {
int lsb,
int width,
Condition cond = al);
- void Bfc(Register dst, int lsb, int width, Condition cond = al);
+ void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
void Usat(Register dst, int satpos, const Operand& src,
Condition cond = al);
@@ -482,6 +498,7 @@ class MacroAssembler: public Assembler {
void Vmov(const DwVfpRegister dst,
const double imm,
+ const Register scratch = no_reg,
const Condition cond = al);
// Enter exit frame.
@@ -499,8 +516,8 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the global context if the map in register
- // map_in_out is the cached Array map in the global context of
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@@ -512,7 +529,8 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
- Register map_out);
+ Register map_out,
+ bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
@@ -728,7 +746,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required);
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
Register scratch1,
@@ -802,9 +821,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail);
+ void CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
@@ -813,6 +832,7 @@ class MacroAssembler: public Assembler {
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
+ // All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
@@ -830,6 +850,13 @@ class MacroAssembler: public Assembler {
Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP);
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
@@ -866,12 +893,15 @@ class MacroAssembler: public Assembler {
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string.
+ // Returns a condition that will be enabled if the object was a string
+ // and the passed-in condition passed. If the passed-in condition failed
+ // then flags remain unchanged.
Condition IsObjectStringType(Register obj,
- Register type) {
- ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
- ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
- tst(type, Operand(kIsNotStringMask));
+ Register type,
+ Condition cond = al) {
+ ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
+ ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
+ tst(type, Operand(kIsNotStringMask), cond);
ASSERT_EQ(0, kStringTag);
return eq;
}
@@ -928,21 +958,22 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_scratch,
Label *not_int32);
- // Truncates a double using a specific rounding mode.
+ // Truncates a double using a specific rounding mode, and writes the value
+ // to the result register.
// Clears the z flag (ne condition) if an overflow occurs.
- // If exact_conversion is true, the z flag is also cleared if the conversion
- // was inexact, i.e. if the double value could not be converted exactly
- // to a 32bit integer.
+ // If kCheckForInexactConversion is passed, the z flag is also cleared if the
+ // conversion was inexact, i.e. if the double value could not be converted
+ // exactly to a 32-bit integer.
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
- SwVfpRegister result,
+ Register result,
DwVfpRegister double_input,
- Register scratch1,
- Register scratch2,
+ Register scratch,
+ DwVfpRegister double_scratch,
CheckForInexactConversion check
= kDontCheckForInexactConversion);
// Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the singed 32bit
+ // This will truncate a floating-point value outside of the signed 32bit
// integer range to a 32bit signed integer.
// Expects the double value loaded in input_high and input_low.
// Exits with the answer in 'result'.
@@ -1174,7 +1205,7 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
- // Jump the register contains a smi.
+ // Jump if the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
@@ -1189,17 +1220,18 @@ class MacroAssembler: public Assembler {
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
- void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is a string, enabled via --debug-code.
+ void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ // Abort execution if argument is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
// ---------------------------------------------------------------------------
// HeapNumber utilities
@@ -1261,6 +1293,16 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const int shift = Field::kShift;
+ static const int mask = (Field::kMask >> shift) << kSmiTagSize;
+ mov(reg, Operand(reg, LSR, shift));
+ and_(reg, reg, Operand(mask));
+ }
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -1368,12 +1410,12 @@ inline MemOperand ContextOperand(Register context, int index) {
inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
+ return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
}
static inline MemOperand QmlGlobalObjectOperand() {
- return ContextOperand(cp, Context::QML_GLOBAL_INDEX);
+ return ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX);
}
diff --git a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
index a833624..17b8677 100644
--- a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,45 +43,49 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
+ * - r4 : Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
* - r5 : Pointer to current code object (Code*) including heap object tag.
* - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
- * - r8 : points to tip of backtrack stack
+ * - r8 : Points to tip of backtrack stack
* - r9 : Unused, might be used by C code and expected unchanged.
* - r10 : End of input (points to byte after last character in input).
* - r11 : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - r12 : IP register, used by assembler. Very volatile.
- * - r13/sp : points to tip of C stack.
+ * - r13/sp : Points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
- * - fp[52] Isolate* isolate (Address of the current isolate)
- * - fp[48] direct_call (if 1, direct call from JavaScript code,
- * if 0, call through the runtime system).
- * - fp[44] stack_area_base (High end of the memory area to use as
- * backtracking stack).
+ * - fp[56] Isolate* isolate (address of the current isolate)
+ * - fp[52] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[48] stack_area_base (high end of the memory area to use as
+ * backtracking stack).
+ * - fp[44] capture array size (may fit multiple sets of matches)
* - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call.
* --- sp when called ---
- * - fp[32] return address (lr).
- * - fp[28] old frame pointer (r11).
+ * - fp[32] return address (lr).
+ * - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
- * - fp[-4] end of input (Address of end of string).
- * - fp[-8] start of input (Address of first character in string).
+ * - fp[-4] end of input (address of end of string).
+ * - fp[-8] start of input (address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] Offset of location before start of input (effectively character
+ * - fp[-20] success counter (only for global regexps to count matches).
+ * - fp[-24] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
- * - fp[-24] At start (if 1, we are starting at the start of the
+ * - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
- * - fp[-28] register 0 (Only positions must be stored in the first
+ * - fp[-32] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -115,8 +119,10 @@ namespace internal {
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -197,9 +203,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+ __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(eq, &not_at_start);
+ BranchOrBacktrack(ne, &not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@@ -212,9 +218,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+ __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE));
- BranchOrBacktrack(eq, on_not_at_start);
+ BranchOrBacktrack(ne, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ add(r0, end_of_input_address(), Operand(current_input_offset()));
@@ -432,16 +438,6 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
}
-void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- __ ldr(r0, register_location(reg1));
- __ ldr(r1, register_location(reg2));
- __ cmp(r0, r1);
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
Label* on_not_equal) {
__ cmp(current_character(), Operand(c));
@@ -655,6 +651,7 @@ void RegExpMacroAssemblerARM::Fail() {
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
+ Label return_r0;
// Finalize code - write the entry point code now we know how many
// registers we need.
@@ -678,8 +675,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
+ __ mov(r0, Operand(0, RelocInfo::NONE));
+ __ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
- __ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@@ -698,13 +696,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(r0, Operand(EXCEPTION));
- __ jmp(&exit_label_);
+ __ jmp(&return_r0);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &exit_label_);
+ __ b(ne, &return_r0);
__ bind(&stack_ok);
@@ -725,41 +723,45 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Determine whether the start index is zero, that is at the start of the
- // string, and store that value in a local variable.
- __ cmp(r1, Operand(0));
- __ mov(r1, Operand(1), LeaveCC, eq);
- __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
- __ str(r1, MemOperand(frame_pointer(), kAtStart));
+ // Initialize code pointer register
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ b(ne, &load_char_start_regexp);
+ __ mov(current_character(), Operand('\n'), LeaveCC, eq);
+ __ jmp(&start_regexp);
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
-
- // Address of register 0.
- __ add(r1, frame_pointer(), Operand(kRegisterZero));
- __ mov(r2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(ne, &init_loop);
+ if (num_saved_registers_ > 8) {
+ // Address of register 0.
+ __ add(r1, frame_pointer(), Operand(kRegisterZero));
+ __ mov(r2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(ne, &init_loop);
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ str(r0, register_location(i));
+ }
+ }
}
// Initialize backtrack stack pointer.
__ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
- // Initialize code pointer register
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
- // Load previous char as initial value of current character register.
- Label at_start;
- __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
- __ b(ne, &at_start);
- LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
- __ jmp(&start_label_);
- __ bind(&at_start);
- __ mov(current_character(), Operand('\n'));
- __ jmp(&start_label_);
+ __ jmp(&start_label_);
// Exit code:
if (success_label_.is_linked()) {
@@ -786,6 +788,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ ldr(r2, register_location(i));
__ ldr(r3, register_location(i + 1));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in r4 for the zero-length check later.
+ __ mov(r4, r2);
+ }
if (mode_ == UC16) {
__ add(r2, r1, Operand(r2, ASR, 1));
__ add(r3, r1, Operand(r3, ASR, 1));
@@ -797,10 +803,58 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
}
}
- __ mov(r0, Operand(SUCCESS));
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ add(r0, r0, Operand(1));
+ __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ sub(r1, r1, Operand(num_saved_registers_));
+ // Check whether we have enough room for another set of capture results.
+ __ cmp(r1, Operand(num_saved_registers_));
+ __ b(lt, &return_r0);
+
+ __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
+ __ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare r0 to initialize registers with its value in the next run.
+ __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // r4: capture start index
+ __ cmp(current_input_offset(), r4);
+ // Not a zero-length match, restart.
+ __ b(ne, &load_char_start_regexp);
+ // Offset from the end is zero if we already reached the end.
+ __ cmp(current_input_offset(), Operand(0));
+ __ b(eq, &exit_label_);
+ // Advance current position after a zero-length match.
+ __ add(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ b(&load_char_start_regexp);
+ } else {
+ __ mov(r0, Operand(SUCCESS));
+ }
}
+
// Exit and return r0
__ bind(&exit_label_);
+ if (global()) {
+ __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_r0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
@@ -822,7 +876,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given
// result as return value.
- __ b(ne, &exit_label_);
+ __ b(ne, &return_r0);
// String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@@ -859,7 +913,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(r0, Operand(EXCEPTION));
- __ jmp(&exit_label_);
+ __ jmp(&return_r0);
}
CodeDesc code_desc;
@@ -1014,8 +1068,9 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
}
-void RegExpMacroAssemblerARM::Succeed() {
+bool RegExpMacroAssemblerARM::Succeed() {
__ jmp(&success_label_);
+ return global();
}
@@ -1303,20 +1358,26 @@ void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
}
+bool RegExpMacroAssemblerARM::CanReadUnaligned() {
+ return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
+}
+
+
void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
- __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
- offset = r0;
+ // r4 is not being used to store the capture start index at this point.
+ __ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = r4;
}
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it.
// If unaligned load/stores are not supported then this function must only
// be used to load a single character at a time.
-#if !V8_TARGET_CAN_READ_UNALIGNED
- ASSERT(characters == 1);
-#endif
+ if (!CanReadUnaligned()) {
+ ASSERT(characters == 1);
+ }
if (mode_ == ASCII) {
if (characters == 4) {
diff --git a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h
index 14f984f..c45669a 100644
--- a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,17 +35,10 @@ namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerARM();
- virtual ~RegExpMacroAssemblerARM();
-};
-
-#else // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
+ RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerARM();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
@@ -70,7 +63,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
@@ -113,10 +105,11 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
+ virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
@@ -137,7 +130,8 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@@ -149,10 +143,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kInputStartMinusOne = kInputString - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.cc b/src/3rdparty/v8/src/arm/simulator-arm.cc
index 629c209..bd7f1bd 100644
--- a/src/3rdparty/v8/src/arm/simulator-arm.cc
+++ b/src/3rdparty/v8/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -276,7 +276,7 @@ void ArmDebugger::Debug() {
// make them invisible to all commands.
UndoBreakpoints();
- while (!done) {
+ while (!done && !sim_->has_bad_pc()) {
if (last_pc != sim_->get_pc()) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@@ -945,73 +945,31 @@ unsigned int Simulator::get_s_register(int sreg) const {
}
-void Simulator::set_s_register_from_float(int sreg, const float flt) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- // Read the bits from the single precision floating point value
- // into the unsigned integer element of vfp_register[] given by index=sreg.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &flt, sizeof(vfp_register[0]));
- memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
-}
-
-
-void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- // Read the bits from the integer value into the unsigned integer element of
- // vfp_register[] given by index=sreg.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &sint, sizeof(vfp_register[0]));
- memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
-}
-
-
-void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
- // Read the bits from the double precision floating point value into the two
- // consecutive unsigned integer elements of vfp_register[] given by index
- // 2*sreg and 2*sreg+1.
- char buffer[2 * sizeof(vfp_register[0])];
- memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
- memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
-}
+template<class InputType, int register_size>
+void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
+ ASSERT(reg_index >= 0);
+ if (register_size == 1) ASSERT(reg_index < num_s_registers);
+ if (register_size == 2) ASSERT(reg_index < num_d_registers);
-
-float Simulator::get_float_from_s_register(int sreg) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
-
- float sm_val = 0.0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the single precision floating point value and return it.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
- memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
- return(sm_val);
+ char buffer[register_size * sizeof(vfp_register[0])];
+ memcpy(buffer, &value, register_size * sizeof(vfp_register[0]));
+ memcpy(&vfp_register[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_register[0]));
}
-int Simulator::get_sinteger_from_s_register(int sreg) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
-
- int sm_val = 0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the single precision floating point value and return it.
- char buffer[sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
- memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
- return(sm_val);
-}
+template<class ReturnType, int register_size>
+ReturnType Simulator::GetFromVFPRegister(int reg_index) {
+ ASSERT(reg_index >= 0);
+ if (register_size == 1) ASSERT(reg_index < num_s_registers);
+ if (register_size == 2) ASSERT(reg_index < num_d_registers);
-
-double Simulator::get_double_from_d_register(int dreg) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
-
- double dm_val = 0.0;
- // Read the bits from the unsigned integer vfp_register[] array
- // into the double precision floating point value and return it.
- char buffer[2 * sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
- return(dm_val);
+ ReturnType value = 0;
+ char buffer[register_size * sizeof(vfp_register[0])];
+ memcpy(buffer, &vfp_register[register_size * reg_index],
+ register_size * sizeof(vfp_register[0]));
+ memcpy(&value, buffer, register_size * sizeof(vfp_register[0]));
+ return value;
}
@@ -1108,111 +1066,83 @@ void Simulator::TrashCallerSaveRegisters() {
int Simulator::ReadW(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
+ } else {
+ PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
-#endif
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
- return;
+ } else {
+ PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
}
- PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
}
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
+ } else {
+ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08"
+ V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
-#endif
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
+ } else {
+ PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
-#endif
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
- return;
+ } else {
+ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08"
+ V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
}
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
}
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
- return;
+ } else {
+ PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
}
- PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
}
@@ -1241,37 +1171,26 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
int32_t* Simulator::ReadDW(int32_t addr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
+ } else {
+ PrintF("Unaligned read at 0x%08x\n", addr);
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
-#endif
}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
- return;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
- return;
+ } else {
+ PrintF("Unaligned write at 0x%08x\n", addr);
+ UNIMPLEMENTED();
}
- PrintF("Unaligned write at 0x%08x\n", addr);
- UNIMPLEMENTED();
-#endif
}
@@ -1468,7 +1387,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
- UNIMPLEMENTED();
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
break;
}
@@ -1540,7 +1466,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
- UNIMPLEMENTED();
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
+ uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
+ result = right | left;
+ *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
+ }
break;
}
@@ -2028,11 +1961,23 @@ void Simulator::DecodeType01(Instruction* instr) {
SetNZFlags(alu_out);
}
} else {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ int rd = instr->RdValue();
+ int32_t acc_value = get_register(rd);
+ if (instr->Bit(22) == 0) {
+ // The MLA instruction description (A 4.1.28) refers to the order
+ // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
+ // Rn field to encode the Rd register and the Rd field to encode
+ // the Rn register.
+ // Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
+ int32_t mul_out = rm_val * rs_val;
+ int32_t result = acc_value + mul_out;
+ set_register(rn, result);
+ } else {
+ // Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
+ int32_t mul_out = rm_val * rs_val;
+ int32_t result = acc_value - mul_out;
+ set_register(rn, result);
+ }
}
} else {
// The signed/long multiply instructions use the terms RdHi and RdLo
@@ -2252,6 +2197,8 @@ void Simulator::DecodeType01(Instruction* instr) {
PrintF("%08x\n", instr->InstructionBits());
UNIMPLEMENTED();
}
+ } else if ((type == 1) && instr->IsNopType1()) {
+ // NOP.
} else {
int rd = instr->RdValue();
int rn = instr->RnValue();
@@ -2408,7 +2355,7 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "cmn'cond 'rn, 'imm");
alu_out = rn_val + shifter_operand;
SetNZFlags(alu_out);
- SetCFlag(!CarryFrom(rn_val, shifter_operand));
+ SetCFlag(CarryFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
} else {
// Other instructions matching this pattern are handled in the
@@ -2588,6 +2535,25 @@ void Simulator::DecodeType3(Instruction* instr) {
break;
}
case db_x: {
+ if (FLAG_enable_sudiv) {
+ if (!instr->HasW()) {
+ if (instr->Bits(5, 4) == 0x1) {
+ if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+ // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs
+ // Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs);
+ int rm = instr->RmValue();
+ int32_t rm_val = get_register(rm);
+ int rs = instr->RsValue();
+ int32_t rs_val = get_register(rs);
+ int32_t ret_val = 0;
+ ASSERT(rs_val != 0);
+ ret_val = rm_val/rs_val;
+ set_register(rn, ret_val);
+ return;
+ }
+ }
+ }
+ }
// Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
addr = rn_val - shifter_operand;
if (instr->HasW()) {
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.h b/src/3rdparty/v8/src/arm/simulator-arm.h
index 585f1e0..abc91bb 100644
--- a/src/3rdparty/v8/src/arm/simulator-arm.h
+++ b/src/3rdparty/v8/src/arm/simulator-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -49,16 +49,16 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, Address, int, Isolate*);
+ void*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7))
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -163,12 +163,30 @@ class Simulator {
// Support for VFP.
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
- void set_d_register_from_double(int dreg, const double& dbl);
- double get_double_from_d_register(int dreg);
- void set_s_register_from_float(int sreg, const float dbl);
- float get_float_from_s_register(int sreg);
- void set_s_register_from_sinteger(int reg, const int value);
- int get_sinteger_from_s_register(int reg);
+
+ void set_d_register_from_double(int dreg, const double& dbl) {
+ SetVFPRegister<double, 2>(dreg, dbl);
+ }
+
+ double get_double_from_d_register(int dreg) {
+ return GetFromVFPRegister<double, 2>(dreg);
+ }
+
+ void set_s_register_from_float(int sreg, const float flt) {
+ SetVFPRegister<float, 1>(sreg, flt);
+ }
+
+ float get_float_from_s_register(int sreg) {
+ return GetFromVFPRegister<float, 1>(sreg);
+ }
+
+ void set_s_register_from_sinteger(int sreg, const int sint) {
+ SetVFPRegister<int, 1>(sreg, sint);
+ }
+
+ int get_sinteger_from_s_register(int sreg) {
+ return GetFromVFPRegister<int, 1>(sreg);
+ }
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
@@ -332,6 +350,12 @@ class Simulator {
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
+ template<class ReturnType, int register_size>
+ ReturnType GetFromVFPRegister(int reg_index);
+
+ template<class InputType, int register_size>
+ void SetVFPRegister(int reg_index, const InputType& value);
+
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q
@@ -401,9 +425,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
- entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+ entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/src/3rdparty/v8/src/arm/stub-cache-arm.cc b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
index 49c0982..9fc39d4 100644
--- a/src/3rdparty/v8/src/arm/stub-cache-arm.cc
+++ b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
@@ -283,11 +283,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
__ ldr(prototype,
- FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ __ ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
__ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ ldr(prototype,
@@ -304,13 +305,14 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ Move(ip, isolate->global());
+ __ ldr(prototype,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ Move(ip, isolate->global_object());
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
- JSFunction::cast(isolate->global_context()->get(index)));
+ JSFunction::cast(isolate->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -435,22 +437,59 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
int index,
Handle<Map> transition,
+ Handle<String> name,
Register receiver_reg,
Register name_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label) {
// r0 : value
Label exit;
+ LookupResult lookup(masm->isolate());
+ object->Lookup(*name, &lookup);
+ if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
+ // In sloppy mode, we could just return the value and be done. However, we
+ // might be in strict mode, where we have to throw. Since we cannot tell,
+ // go into slow case unconditionally.
+ __ jmp(miss_label);
+ return;
+ }
+
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+ __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Check that we are allowed to write this.
+ if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ JSObject* holder;
+ if (lookup.IsFound()) {
+ holder = lookup.holder();
+ } else {
+ // Find the top object.
+ holder = *object;
+ do {
+ holder = JSObject::cast(holder->GetPrototype());
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+ // We need an extra register, push
+ __ push(name_reg);
+ Label miss_pop, done_check;
+ CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
+ scratch1, scratch2, name, &miss_pop);
+ __ jmp(&done_check);
+ __ bind(&miss_pop);
+ __ pop(name_reg);
+ __ jmp(miss_label);
+ __ bind(&done_check);
+ __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -473,10 +512,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
if (!transition.is_null()) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ mov(ip, Operand(transition));
- __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ // Update the map of the object.
+ __ mov(scratch1, Operand(transition));
+ __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field and pass the now unused
+ // name_reg as scratch register.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ name_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
}
// Adjust for the number of properties stored in the object. Even in the
@@ -498,15 +547,16 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ RecordWriteField(receiver_reg,
offset,
name_reg,
- scratch,
+ scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(r0, FieldMemOperand(scratch, offset));
+ __ ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ str(r0, FieldMemOperand(scratch1, offset));
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(r0, &exit);
@@ -514,7 +564,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, r0);
- __ RecordWriteField(scratch,
+ __ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
@@ -938,8 +988,8 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register fval,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
@@ -1182,6 +1232,45 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
+void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
+ ASSERT(!receiver.is(scratch1));
+ ASSERT(!receiver.is(scratch2));
+ ASSERT(!receiver.is(scratch3));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch1;
+ __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ miss,
+ &probe_done,
+ dictionary,
+ name_reg,
+ scratch2,
+ scratch3);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3;
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
+ __ cmp(scratch2, Operand(callback));
+ __ b(ne, miss);
+}
+
+
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1189,6 +1278,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@@ -1199,6 +1289,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ GenerateDictionaryLoadCallback(
+ reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
+ }
+
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
@@ -1255,12 +1350,13 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
}
}
@@ -1328,7 +1424,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), r0, holder_reg,
@@ -1477,7 +1573,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -1581,16 +1677,29 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
+ __ CheckFastSmiElements(r3, r7, &call_builtin);
// edx: receiver
// r3: map
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ Label try_holey_map;
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
r7,
+ &try_holey_map);
+ __ mov(r2, receiver);
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
+ __ jmp(&fast_object);
+
+ __ bind(&try_holey_map);
+ __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
+ FAST_HOLEY_ELEMENTS,
+ r3,
+ r7,
&call_builtin);
__ mov(r2, receiver);
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
@@ -2009,7 +2118,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2027,11 +2136,11 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(VFP3)) {
+ if (!CpuFeatures::IsSupported(VFP2)) {
return Handle<Code>::null();
}
- CpuFeatures::Scope scope_vfp3(VFP3);
+ CpuFeatures::Scope scope_vfp2(VFP2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -2155,7 +2264,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2254,7 +2363,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2471,7 +2580,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2529,7 +2638,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2545,20 +2654,29 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
// -----------------------------------
Label miss;
- GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ r1, r2, r3, r4,
+ &miss);
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2566,19 +2684,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- lr : return address
// -----------------------------------
Label miss;
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(r1, &miss);
+ CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
- // Check that the map of the object hasn't changed.
- __ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(r1); // receiver
__ mov(ip, Operand(callback)); // callback info
@@ -2596,7 +2707,80 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(r0);
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ __ Push(r1, r0);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(r0);
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(r1, &miss);
+ CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
+
+ GenerateStoreViaSetter(masm(), setter);
+
+ __ bind(&miss);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2641,7 +2825,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2687,7 +2871,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2722,7 +2906,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@@ -2742,7 +2926,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2757,13 +2941,76 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -- lr : return address
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
+ GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, r5, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(r0);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(r0, &miss);
+ CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
+
+ GenerateLoadViaGetter(masm(), getter);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2783,7 +3030,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2805,7 +3052,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2847,7 +3094,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2870,7 +3117,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2890,12 +3137,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(r0, Operand(name));
__ b(ne, &miss);
- GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
+ GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, r5, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2920,7 +3167,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2946,7 +3193,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2967,7 +3214,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2993,7 +3240,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3018,7 +3265,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
__ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3038,7 +3285,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3066,7 +3313,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3091,7 +3338,13 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
// r3 is used as scratch register. r1 and r2 keep their values if a jump to
// the miss label is generated.
- GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ r2, r1, r3, r4,
+ &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
@@ -3099,7 +3352,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
@@ -3123,7 +3378,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3162,7 +3417,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3212,7 +3467,13 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// r1: constructor function
// r2: initial map
// r7: undefined
+ ASSERT(function->has_initial_map());
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+#ifdef DEBUG
+ int instance_size = function->initial_map()->instance_size();
+ __ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
+ __ Check(eq, "Instance size of initial map changed.");
+#endif
__ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
@@ -3270,7 +3531,6 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -3372,8 +3632,11 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3388,9 +3651,10 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch0,
Register scratch1,
DwVfpRegister double_scratch0,
+ DwVfpRegister double_scratch1,
Label* fail) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
@@ -3404,13 +3668,12 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
- double_scratch0.low(),
- double_scratch0,
scratch0,
+ double_scratch0,
scratch1,
+ double_scratch1,
kCheckForInexactConversion);
__ b(ne, fail);
- __ vmov(scratch0, double_scratch0.low());
__ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0);
__ bind(&key_ok);
@@ -3438,7 +3701,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r3: elements array
@@ -3476,8 +3739,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_FLOAT_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
@@ -3485,8 +3748,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case EXTERNAL_DOUBLE_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 2));
__ vldr(d0, r2, 0);
} else {
@@ -3497,8 +3760,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3526,22 +3792,28 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret();
__ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ add(r0, r5, Operand(kHeapObjectTag));
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ vstr(d0, r5, HeapNumber::kValueOffset);
__ Ret();
} else {
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ mov(r0, r5);
Register dst1 = r1;
Register dst2 = r3;
FloatingPointHelper::Destination dest =
@@ -3562,8 +3834,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
@@ -3577,13 +3849,12 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_u32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
@@ -3615,7 +3886,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@@ -3626,25 +3897,24 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_f32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
@@ -3694,24 +3964,23 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret();
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
__ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
@@ -3769,7 +4038,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -3820,7 +4089,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
@@ -3830,7 +4099,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
d0, r6, r7, // These are: double_dst, dst1, dst2.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vstr(d0, r3, 0);
} else {
__ str(r6, MemOperand(r3, 0));
@@ -3838,8 +4107,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3862,8 +4134,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
@@ -3902,8 +4174,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -4042,8 +4317,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -4095,7 +4373,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
// Get the elements array.
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
@@ -4147,7 +4425,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
// Get the elements array.
__ ldr(elements_reg,
@@ -4169,7 +4447,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber);
+ heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
// Don't need to reload the upper 32 bits of the double, it's already in
// scratch.
@@ -4223,9 +4501,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
@@ -4253,7 +4531,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4263,7 +4541,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
} else {
- ASSERT(elements_kind == FAST_ELEMENTS);
+ ASSERT(IsFastObjectElementsKind(elements_kind));
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4391,7 +4669,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4416,6 +4694,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
+ // All registers after this are overwritten.
elements_reg,
scratch1,
scratch2,
diff --git a/src/3rdparty/v8/src/array.js b/src/3rdparty/v8/src/array.js
index a1cc5b6..37053ce 100644
--- a/src/3rdparty/v8/src/array.js
+++ b/src/3rdparty/v8/src/array.js
@@ -62,7 +62,7 @@ function GetSortedArrayKeys(array, intervals) {
}
}
}
- keys.sort(function(a, b) { return a - b; });
+ %_CallFunction(keys, function(a, b) { return a - b; }, ArraySort);
return keys;
}
@@ -441,8 +441,8 @@ function ArrayPop() {
}
n--;
var value = this[n];
- this.length = n;
delete this[n];
+ this.length = n;
return value;
}
@@ -581,7 +581,7 @@ function ArrayShift() {
var first = this[0];
- if (IS_ARRAY(this)) {
+ if (IS_ARRAY(this) && !%IsObserved(this)) {
SmartMove(this, 0, 1, len, 0);
} else {
SimpleMove(this, 0, 1, len, 0);
@@ -602,7 +602,7 @@ function ArrayUnshift(arg1) { // length == 1
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
- if (IS_ARRAY(this)) {
+ if (IS_ARRAY(this) && !%IsObserved(this)) {
SmartMove(this, 0, 0, len, num_arguments);
} else {
SimpleMove(this, 0, 0, len, num_arguments);
@@ -649,6 +649,7 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result;
if (IS_ARRAY(this) &&
+ !%IsObserved(this) &&
(end_i > 1000) &&
(%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result);
@@ -705,7 +706,9 @@ function ArraySplice(start, delete_count) {
var use_simple_splice = true;
- if (IS_ARRAY(this) && num_additional_args !== del_count) {
+ if (IS_ARRAY(this) &&
+ !%IsObserved(this) &&
+ num_additional_args !== del_count) {
// If we are only deleting/moving a few things near the end of the
// array then the simple version is going to be faster, because it
// doesn't touch most of the array.
@@ -777,78 +780,103 @@ function ArraySort(comparefn) {
}
};
- var QuickSort = function QuickSort(a, from, to) {
- // Insertion sort is faster for short arrays.
- if (to - from <= 10) {
- InsertionSort(a, from, to);
- return;
+ var GetThirdIndex = function(a, from, to) {
+ var t_array = [];
+ // Use both 'from' and 'to' to determine the pivot candidates.
+ var increment = 200 + ((to - from) & 15);
+ for (var i = from + 1; i < to - 1; i += increment) {
+ t_array.push([i, a[i]]);
}
- // Find a pivot as the median of first, last and middle element.
- var v0 = a[from];
- var v1 = a[to - 1];
- var middle_index = from + ((to - from) >> 1);
- var v2 = a[middle_index];
- var c01 = %_CallFunction(receiver, v0, v1, comparefn);
- if (c01 > 0) {
- // v1 < v0, so swap them.
- var tmp = v0;
- v0 = v1;
- v1 = tmp;
- } // v0 <= v1.
- var c02 = %_CallFunction(receiver, v0, v2, comparefn);
- if (c02 >= 0) {
- // v2 <= v0 <= v1.
- var tmp = v0;
- v0 = v2;
- v2 = v1;
- v1 = tmp;
- } else {
- // v0 <= v1 && v0 < v2
- var c12 = %_CallFunction(receiver, v1, v2, comparefn);
- if (c12 > 0) {
- // v0 <= v2 < v1
- var tmp = v1;
- v1 = v2;
- v2 = tmp;
+ t_array.sort(function(a, b) {
+ return %_CallFunction(receiver, a[1], b[1], comparefn) } );
+ var third_index = t_array[t_array.length >> 1][0];
+ return third_index;
+ }
+
+ var QuickSort = function QuickSort(a, from, to) {
+ var third_index = 0;
+ while (true) {
+ // Insertion sort is faster for short arrays.
+ if (to - from <= 10) {
+ InsertionSort(a, from, to);
+ return;
}
- }
- // v0 <= v1 <= v2
- a[from] = v0;
- a[to - 1] = v2;
- var pivot = v1;
- var low_end = from + 1; // Upper bound of elements lower than pivot.
- var high_start = to - 1; // Lower bound of elements greater than pivot.
- a[middle_index] = a[low_end];
- a[low_end] = pivot;
-
- // From low_end to i are elements equal to pivot.
- // From i to high_start are elements that haven't been compared yet.
- partition: for (var i = low_end + 1; i < high_start; i++) {
- var element = a[i];
- var order = %_CallFunction(receiver, element, pivot, comparefn);
- if (order < 0) {
- a[i] = a[low_end];
- a[low_end] = element;
- low_end++;
- } else if (order > 0) {
- do {
- high_start--;
- if (high_start == i) break partition;
- var top_elem = a[high_start];
- order = %_CallFunction(receiver, top_elem, pivot, comparefn);
- } while (order > 0);
- a[i] = a[high_start];
- a[high_start] = element;
+ if (to - from > 1000) {
+ third_index = GetThirdIndex(a, from, to);
+ } else {
+ third_index = from + ((to - from) >> 1);
+ }
+ // Find a pivot as the median of first, last and middle element.
+ var v0 = a[from];
+ var v1 = a[to - 1];
+ var v2 = a[third_index];
+ var c01 = %_CallFunction(receiver, v0, v1, comparefn);
+ if (c01 > 0) {
+ // v1 < v0, so swap them.
+ var tmp = v0;
+ v0 = v1;
+ v1 = tmp;
+ } // v0 <= v1.
+ var c02 = %_CallFunction(receiver, v0, v2, comparefn);
+ if (c02 >= 0) {
+ // v2 <= v0 <= v1.
+ var tmp = v0;
+ v0 = v2;
+ v2 = v1;
+ v1 = tmp;
+ } else {
+ // v0 <= v1 && v0 < v2
+ var c12 = %_CallFunction(receiver, v1, v2, comparefn);
+ if (c12 > 0) {
+ // v0 <= v2 < v1
+ var tmp = v1;
+ v1 = v2;
+ v2 = tmp;
+ }
+ }
+ // v0 <= v1 <= v2
+ a[from] = v0;
+ a[to - 1] = v2;
+ var pivot = v1;
+ var low_end = from + 1; // Upper bound of elements lower than pivot.
+ var high_start = to - 1; // Lower bound of elements greater than pivot.
+ a[third_index] = a[low_end];
+ a[low_end] = pivot;
+
+ // From low_end to i are elements equal to pivot.
+ // From i to high_start are elements that haven't been compared yet.
+ partition: for (var i = low_end + 1; i < high_start; i++) {
+ var element = a[i];
+ var order = %_CallFunction(receiver, element, pivot, comparefn);
if (order < 0) {
- element = a[i];
a[i] = a[low_end];
a[low_end] = element;
low_end++;
+ } else if (order > 0) {
+ do {
+ high_start--;
+ if (high_start == i) break partition;
+ var top_elem = a[high_start];
+ order = %_CallFunction(receiver, top_elem, pivot, comparefn);
+ } while (order > 0);
+ a[i] = a[high_start];
+ a[high_start] = element;
+ if (order < 0) {
+ element = a[i];
+ a[i] = a[low_end];
+ a[low_end] = element;
+ low_end++;
+ }
}
}
+ if (to - high_start < low_end - from) {
+ QuickSort(a, high_start, to);
+ to = low_end;
+ } else {
+ QuickSort(a, from, low_end);
+ from = high_start;
+ }
}
- QuickSort(a, from, low_end);
- QuickSort(a, high_start, to);
};
// Copy elements in the range 0..length from obj's prototype chain
@@ -1524,9 +1552,11 @@ function SetUpArray() {
// exposed to user code.
// Adding only the functions that are actually used.
SetUpLockedPrototype(InternalArray, $Array(), $Array(
+ "indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
- "push", getFunction("push", ArrayPush)
+ "push", getFunction("push", ArrayPush),
+ "splice", getFunction("splice", ArraySplice)
));
}
diff --git a/src/3rdparty/v8/src/assembler.cc b/src/3rdparty/v8/src/assembler.cc
index 4c1c744..c0867dd 100644
--- a/src/3rdparty/v8/src/assembler.cc
+++ b/src/3rdparty/v8/src/assembler.cc
@@ -108,7 +108,9 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
AssemblerBase::AssemblerBase(Isolate* isolate)
: isolate_(isolate),
- jit_cookie_(0) {
+ jit_cookie_(0),
+ emit_debug_code_(FLAG_debug_code),
+ predictable_code_size_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
}
@@ -141,7 +143,7 @@ int Label::pos() const {
// an iteration.
//
// The encoding relies on the fact that there are fewer than 14
-// different non-compactly encoded relocation modes.
+// different relocation modes using standard non-compact encoding.
//
// The first byte of a relocation record has a tag in its low 2 bits:
// Here are the record schemes, depending on the low tag and optional higher
@@ -173,7 +175,9 @@ int Label::pos() const {
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
-// 1101: not used (would allow one more relocation mode to be added)
+// 1101: constant pool. Used on ARM only for now.
+// The format is: 11 1101 11
+// signed int (size of the constant pool).
// 1110: long_data_record
// The format is: [2-bit data_type_tag] 1110 11
// signed intptr_t, lowest byte written first
@@ -194,7 +198,7 @@ int Label::pos() const {
// dropped, and last non-zero chunk tagged with 1.)
-const int kMaxRelocModes = 14;
+const int kMaxStandardNonCompactModes = 14;
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
@@ -228,6 +232,9 @@ const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
+const int kConstPoolExtraTag = kPCJumpExtraTag - 2;
+const int kConstPoolTag = 3;
+
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
@@ -285,6 +292,15 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
}
+void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
+ WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
+ for (int i = 0; i < kIntSize; i++) {
+ *--pos_ = static_cast<byte>(data);
+ // Signed right shift is arithmetic shift. Tested in test-utils.cc.
+ data = data >> kBitsPerByte;
+ }
+}
+
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
@@ -299,9 +315,10 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
+ ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
ASSERT(rinfo->pc() - last_pc_ >= 0);
- ASSERT(RelocInfo::NUMBER_OF_MODES - RelocInfo::LAST_COMPACT_ENUM <=
- kMaxRelocModes);
+ ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
+ <= kMaxStandardNonCompactModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode();
@@ -347,6 +364,9 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag);
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
+ } else if (RelocInfo::IsConstPool(rmode)) {
+ WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+ WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data()));
} else {
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
@@ -397,6 +417,15 @@ void RelocIterator::AdvanceReadId() {
}
+void RelocIterator::AdvanceReadConstPoolData() {
+ int x = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+ }
+ rinfo_.data_ = x;
+}
+
+
void RelocIterator::AdvanceReadPosition() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
@@ -500,8 +529,7 @@ void RelocIterator::next() {
ASSERT(tag == kDefaultTag);
int extra_tag = GetExtraTag();
if (extra_tag == kPCJumpExtraTag) {
- int top_tag = GetTopTag();
- if (top_tag == kVariableLengthPCJumpTopTag) {
+ if (GetTopTag() == kVariableLengthPCJumpTopTag) {
AdvanceReadVariableLengthPCJump();
} else {
AdvanceReadPC();
@@ -531,6 +559,13 @@ void RelocIterator::next() {
}
Advance(kIntptrSize);
}
+ } else if ((extra_tag == kConstPoolExtraTag) &&
+ (GetTopTag() == kConstPoolTag)) {
+ if (SetMode(RelocInfo::CONST_POOL)) {
+ AdvanceReadConstPoolData();
+ return;
+ }
+ Advance(kIntSize);
} else {
AdvanceReadPC();
int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
@@ -538,6 +573,15 @@ void RelocIterator::next() {
}
}
}
+ if (code_age_sequence_ != NULL) {
+ byte* old_code_age_sequence = code_age_sequence_;
+ code_age_sequence_ = NULL;
+ if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
+ rinfo_.data_ = 0;
+ rinfo_.pc_ = old_code_age_sequence;
+ return;
+ }
+ }
done_ = true;
}
@@ -553,6 +597,12 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
+ byte* sequence = code->FindCodeAgeSequence();
+ if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
+ code_age_sequence_ = sequence;
+ } else {
+ code_age_sequence_ = NULL;
+ }
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -568,6 +618,7 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
+ code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -613,11 +664,15 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "external reference";
case RelocInfo::INTERNAL_REFERENCE:
return "internal reference";
+ case RelocInfo::CONST_POOL:
+ return "constant pool";
case RelocInfo::DEBUG_BREAK_SLOT:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
#endif
return "debug break slot";
+ case RelocInfo::CODE_AGE_SEQUENCE:
+ return "code_age_sequence";
case RelocInfo::NUMBER_OF_MODES:
UNREACHABLE();
return "number_of_modes";
@@ -627,43 +682,43 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
void RelocInfo::Print(FILE* out) {
- PrintF(out, "%p %s", pc_, RelocModeName(rmode_));
+ FPrintF(out, "%p %s", pc_, RelocModeName(rmode_));
if (IsComment(rmode_)) {
- PrintF(out, " (%s)", reinterpret_cast<char*>(data_));
+ FPrintF(out, " (%s)", reinterpret_cast<char*>(data_));
} else if (rmode_ == EMBEDDED_OBJECT) {
- PrintF(out, " (");
+ FPrintF(out, " (");
target_object()->ShortPrint(out);
- PrintF(out, ")");
+ FPrintF(out, ")");
} else if (rmode_ == EXTERNAL_REFERENCE) {
ExternalReferenceEncoder ref_encoder;
- PrintF(out, " (%s) (%p)",
+ FPrintF(out, " (%s) (%p)",
ref_encoder.NameOfAddress(*target_reference_address()),
*target_reference_address());
} else if (IsCodeTarget(rmode_)) {
Code* code = Code::GetCodeFromTargetAddress(target_address());
- PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
+ FPrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
target_address());
if (rmode_ == CODE_TARGET_WITH_ID) {
PrintF(" (id=%d)", static_cast<int>(data_));
}
} else if (IsPosition(rmode_)) {
- PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
+ FPrintF(out, " (%" V8_PTR_PREFIX "d)", data());
} else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
Isolate::Current()->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
target_address(), Deoptimizer::EAGER);
if (id != Deoptimizer::kNotDeoptimizationEntry) {
- PrintF(out, " (deoptimization bailout %d)", id);
+ FPrintF(out, " (deoptimization bailout %d)", id);
}
}
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
#endif // ENABLE_DISASSEMBLER
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void RelocInfo::Verify() {
switch (rmode_) {
case EMBEDDED_OBJECT:
@@ -683,12 +738,12 @@ void RelocInfo::Verify() {
case CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
- ASSERT(addr != NULL);
+ CHECK(addr != NULL);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = HEAP->FindCodeObject(addr);
- ASSERT(found->IsCode());
- ASSERT(code->address() == HeapObject::cast(found)->address());
+ CHECK(found->IsCode());
+ CHECK(code->address() == HeapObject::cast(found)->address());
break;
}
case RUNTIME_ENTRY:
@@ -698,15 +753,19 @@ void RelocInfo::Verify() {
case STATEMENT_POSITION:
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
+ case CONST_POOL:
case DEBUG_BREAK_SLOT:
case NONE:
break;
case NUMBER_OF_MODES:
UNREACHABLE();
break;
+ case CODE_AGE_SEQUENCE:
+ ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
+ break;
}
}
-#endif // DEBUG
+#endif // VERIFY_HEAP
// -----------------------------------------------------------------------------
@@ -839,6 +898,13 @@ ExternalReference ExternalReference::get_date_field_function(
}
+ExternalReference ExternalReference::get_make_code_young_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
+}
+
+
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@@ -955,6 +1021,24 @@ ExternalReference ExternalReference::scheduled_exception_address(
}
+ExternalReference ExternalReference::address_of_pending_message_obj(
+ Isolate* isolate) {
+ return ExternalReference(isolate->pending_message_obj_address());
+}
+
+
+ExternalReference ExternalReference::address_of_has_pending_message(
+ Isolate* isolate) {
+ return ExternalReference(isolate->has_pending_message_address());
+}
+
+
+ExternalReference ExternalReference::address_of_pending_message_script(
+ Isolate* isolate) {
+ return ExternalReference(isolate->pending_message_script_address());
+}
+
+
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
}
@@ -1039,7 +1123,7 @@ ExternalReference ExternalReference::re_word_character_map() {
ExternalReference ExternalReference::address_of_static_offsets_vector(
Isolate* isolate) {
return ExternalReference(
- OffsetsVector::static_offsets_vector_address(isolate));
+ reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
}
ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
@@ -1133,6 +1217,12 @@ ExternalReference ExternalReference::math_log_double_function(
}
+ExternalReference ExternalReference::page_flags(Page* page) {
+ return ExternalReference(reinterpret_cast<Address>(page) +
+ MemoryChunk::kFlagsOffset);
+}
+
+
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
diff --git a/src/3rdparty/v8/src/assembler.h b/src/3rdparty/v8/src/assembler.h
index 05fe320..037799d 100644
--- a/src/3rdparty/v8/src/assembler.h
+++ b/src/3rdparty/v8/src/assembler.h
@@ -51,7 +51,6 @@ class ApiFunction;
namespace internal {
struct StatsCounter;
-const unsigned kNoASTId = -1;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
@@ -60,7 +59,13 @@ class AssemblerBase: public Malloced {
explicit AssemblerBase(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
- int jit_cookie() { return jit_cookie_; }
+ int jit_cookie() const { return jit_cookie_; }
+
+ bool emit_debug_code() const { return emit_debug_code_; }
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
+ bool predictable_code_size() const { return predictable_code_size_; }
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
@@ -69,6 +74,28 @@ class AssemblerBase: public Malloced {
private:
Isolate* isolate_;
int jit_cookie_;
+ bool emit_debug_code_;
+ bool predictable_code_size_;
+};
+
+
+// Avoids using instructions that vary in size in unpredictable ways between the
+// snapshot and the running VM.
+class PredictableCodeSizeScope {
+ public:
+ explicit PredictableCodeSizeScope(AssemblerBase* assembler)
+ : assembler_(assembler) {
+ old_value_ = assembler_->predictable_code_size();
+ assembler_->set_predictable_code_size(true);
+ }
+
+ ~PredictableCodeSizeScope() {
+ assembler_->set_predictable_code_size(old_value_);
+ }
+
+ private:
+ AssemblerBase* assembler_;
+ bool old_value_;
};
@@ -204,14 +231,25 @@ class RelocInfo BASE_EMBEDDED {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
+ // Marks a constant pool. Only used on ARM.
+ // It uses a custom noncompact encoding.
+ CONST_POOL,
+
// add more as needed
// Pseudo-types
- NUMBER_OF_MODES, // There are at most 14 modes with noncompact encoding.
+ NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
NONE, // never recorded
+ CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
+ // code aging.
+ FIRST_REAL_RELOC_MODE = CODE_TARGET,
+ LAST_REAL_RELOC_MODE = CONST_POOL,
+ FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
+ LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
- LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID
+ LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
+ LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
};
@@ -221,6 +259,15 @@ class RelocInfo BASE_EMBEDDED {
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
}
+ static inline bool IsRealRelocMode(Mode mode) {
+ return mode >= FIRST_REAL_RELOC_MODE &&
+ mode <= LAST_REAL_RELOC_MODE;
+ }
+ static inline bool IsPseudoRelocMode(Mode mode) {
+ ASSERT(!IsRealRelocMode(mode));
+ return mode >= FIRST_PSEUDO_RELOC_MODE &&
+ mode <= LAST_PSEUDO_RELOC_MODE;
+ }
static inline bool IsConstructCall(Mode mode) {
return mode == CONSTRUCT_CALL;
}
@@ -240,6 +287,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsComment(Mode mode) {
return mode == COMMENT;
}
+ static inline bool IsConstPool(Mode mode) {
+ return mode == CONST_POOL;
+ }
static inline bool IsPosition(Mode mode) {
return mode == POSITION || mode == STATEMENT_POSITION;
}
@@ -255,6 +305,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
}
+ static inline bool IsCodeAgeSequence(Mode mode) {
+ return mode == CODE_AGE_SEQUENCE;
+ }
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@@ -287,7 +340,8 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
-
+ INLINE(Code* code_age_stub());
+ INLINE(void set_code_age_stub(Code* stub));
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
@@ -342,8 +396,7 @@ class RelocInfo BASE_EMBEDDED {
static const char* RelocModeName(Mode rmode);
void Print(FILE* out);
#endif // ENABLE_DISASSEMBLER
-#ifdef DEBUG
- // Debugging
+#ifdef VERIFY_HEAP
void Verify();
#endif
@@ -362,19 +415,17 @@ class RelocInfo BASE_EMBEDDED {
Mode rmode_;
intptr_t data_;
Code* host_;
-#ifdef V8_TARGET_ARCH_MIPS
- // Code and Embedded Object pointers in mips are stored split
+ // Code and Embedded Object pointers on some platforms are stored split
// across two consecutive 32-bit instructions. Heap management
// routines expect to access these pointers indirectly. The following
- // location provides a place for these pointers to exist natually
+ // location provides a place for these pointers to exist naturally
// when accessed via the Iterator.
Object* reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs
- // in mips, but are accessed via indirect pointers. This location
+ // on some platforms, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address
// is returned by RelocInfo::target_reference_address().
Address reconstructed_adr_ptr_;
-#endif // V8_TARGET_ARCH_MIPS
friend class RelocIterator;
};
@@ -416,6 +467,7 @@ class RelocInfoWriter BASE_EMBEDDED {
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
+ inline void WriteExtraTaggedConstPoolData(int data);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
@@ -466,6 +518,7 @@ class RelocIterator: public Malloced {
void ReadTaggedPC();
void AdvanceReadPC();
void AdvanceReadId();
+ void AdvanceReadConstPoolData();
void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();
@@ -481,6 +534,7 @@ class RelocIterator: public Malloced {
byte* pos_;
byte* end_;
+ byte* code_age_sequence_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
@@ -589,6 +643,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_date_field_function(Isolate* isolate);
static ExternalReference date_cache_stamp(Isolate* isolate);
+ static ExternalReference get_make_code_young_function(Isolate* isolate);
+
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
@@ -640,6 +696,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference handle_scope_level_address();
static ExternalReference scheduled_exception_address(Isolate* isolate);
+ static ExternalReference address_of_pending_message_obj(Isolate* isolate);
+ static ExternalReference address_of_has_pending_message(Isolate* isolate);
+ static ExternalReference address_of_pending_message_script(Isolate* isolate);
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
@@ -656,6 +715,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
+ static ExternalReference page_flags(Page* page);
+
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/3rdparty/v8/src/ast.cc b/src/3rdparty/v8/src/ast.cc
index a02cede..3015b1e 100644
--- a/src/3rdparty/v8/src/ast.cc
+++ b/src/3rdparty/v8/src/ast.cc
@@ -85,8 +85,8 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
VariableProxy::VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
- int position,
- Interface* interface)
+ Interface* interface,
+ int position)
: Expression(isolate),
name_(name),
var_(NULL),
@@ -125,10 +125,7 @@ Assignment::Assignment(Isolate* isolate,
value_(value),
pos_(pos),
binary_operation_(NULL),
- compound_load_id_(kNoNumber),
assignment_id_(GetNextId(isolate)),
- block_start_(false),
- block_end_(false),
is_monomorphic_(false) { }
@@ -156,6 +153,11 @@ bool FunctionLiteral::AllowsLazyCompilation() {
}
+bool FunctionLiteral::AllowsLazyCompilationWithoutContext() {
+ return scope()->AllowsLazyCompilationWithoutContext();
+}
+
+
int FunctionLiteral::start_position() const {
return scope()->start_position();
}
@@ -247,8 +249,11 @@ bool IsEqualNumber(void* first, void* second) {
}
-void ObjectLiteral::CalculateEmitStore() {
- ZoneHashMap table(Literal::Match);
+void ObjectLiteral::CalculateEmitStore(Zone* zone) {
+ ZoneAllocationPolicy allocator(zone);
+
+ ZoneHashMap table(Literal::Match, ZoneHashMap::kDefaultHashMapCapacity,
+ allocator);
for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i);
Literal* literal = property->key();
@@ -257,23 +262,23 @@ void ObjectLiteral::CalculateEmitStore() {
// If the key of a computed property is in the table, do not emit
// a store for the property later.
if (property->kind() == ObjectLiteral::Property::COMPUTED &&
- table.Lookup(literal, hash, false) != NULL) {
+ table.Lookup(literal, hash, false, allocator) != NULL) {
property->set_emit_store(false);
} else {
// Add key to the table.
- table.Lookup(literal, hash, true);
+ table.Lookup(literal, hash, true, allocator);
}
}
}
-void TargetCollector::AddTarget(Label* target) {
+void TargetCollector::AddTarget(Label* target, Zone* zone) {
// Add the label to the collector, but discard duplicates.
int length = targets_.length();
for (int i = 0; i < length; i++) {
if (targets_[i] == target) return;
}
- targets_.Add(target);
+ targets_.Add(target, zone);
}
@@ -402,7 +407,8 @@ bool FunctionDeclaration::IsInlineable() const {
// ----------------------------------------------------------------------------
// Recording of type feedback
-void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
+ Zone* zone) {
// Record type feedback from the oracle in the AST.
is_uninitialized_ = oracle->LoadIsUninitialized(this);
if (is_uninitialized_) return;
@@ -426,18 +432,21 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true;
} else if (is_monomorphic_) {
- receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this));
+ receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
+ zone);
} else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism);
- oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
+ receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
+ oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
}
}
-void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
+ Zone* zone) {
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
+ TypeFeedbackId id = AssignmentFeedbackId();
+ is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
receiver_types_.Clear();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
@@ -446,23 +455,26 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
oracle->StoreReceiverTypes(this, name, &receiver_types_);
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this));
- } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism);
- oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
+ receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
+ } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
+ receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
+ oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
}
-void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
+void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
+ Zone* zone) {
+ TypeFeedbackId id = CountStoreFeedbackId();
+ is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
receiver_types_.Clear();
if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this));
- } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism);
- oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
+ receiver_types_.Add(
+ oracle->StoreMonomorphicReceiverType(id), zone);
+ } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
+ receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
+ oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
}
@@ -496,7 +508,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
}
LookupResult lookup(type->GetIsolate());
while (true) {
- type->LookupInDescriptors(NULL, *name, &lookup);
+ type->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
case CONSTANT_FUNCTION:
@@ -511,11 +523,9 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
case INTERCEPTOR:
// We don't know the target.
return false;
- case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- // Perhaps something interesting is up in the prototype chain...
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
break;
}
}
@@ -523,6 +533,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
if (!type->prototype()->IsJSObject()) return false;
// Go up the prototype chain, recording where we are currently.
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
+ if (!holder_->HasFastProperties()) return false;
type = Handle<Map>(holder()->map());
}
}
@@ -794,7 +805,7 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// output formats are alike.
class RegExpUnparser: public RegExpVisitor {
public:
- RegExpUnparser();
+ explicit RegExpUnparser(Zone* zone);
void VisitCharacterRange(CharacterRange that);
SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
@@ -804,10 +815,11 @@ class RegExpUnparser: public RegExpVisitor {
StringStream* stream() { return &stream_; }
HeapStringAllocator alloc_;
StringStream stream_;
+ Zone* zone_;
};
-RegExpUnparser::RegExpUnparser() : stream_(&alloc_) {
+RegExpUnparser::RegExpUnparser(Zone* zone) : stream_(&alloc_), zone_(zone) {
}
@@ -847,9 +859,9 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
if (that->is_negated())
stream()->Add("^");
stream()->Add("[");
- for (int i = 0; i < that->ranges()->length(); i++) {
+ for (int i = 0; i < that->ranges(zone_)->length(); i++) {
if (i > 0) stream()->Add(" ");
- VisitCharacterRange(that->ranges()->at(i));
+ VisitCharacterRange(that->ranges(zone_)->at(i));
}
stream()->Add("]");
return NULL;
@@ -951,8 +963,8 @@ void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
}
-SmartArrayPointer<const char> RegExpTree::ToString() {
- RegExpUnparser unparser;
+SmartArrayPointer<const char> RegExpTree::ToString(Zone* zone) {
+ RegExpUnparser unparser(zone);
Accept(&unparser, NULL);
return unparser.ToString();
}
@@ -1029,6 +1041,14 @@ CaseClause::CaseClause(Isolate* isolate,
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
+#define DONT_CACHE_NODE(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_flag(kDontOptimize); \
+ add_flag(kDontInline); \
+ add_flag(kDontSelfOptimize); \
+ add_flag(kDontCache); \
+ }
REGULAR_NODE(VariableDeclaration)
REGULAR_NODE(FunctionDeclaration)
@@ -1043,6 +1063,7 @@ REGULAR_NODE(SwitchStatement)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ObjectLiteral)
+REGULAR_NODE(RegExpLiteral)
REGULAR_NODE(Assignment)
REGULAR_NODE(Throw)
REGULAR_NODE(Property)
@@ -1059,10 +1080,13 @@ REGULAR_NODE(CallNew)
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy)
+// We currently do not optimize any modules. Note in particular, that module
+// instance objects associated with ModuleLiterals are allocated during
+// scope resolution, and references to them are embedded into the code.
+// That code may hence neither be cached nor re-compiled.
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
-DONT_OPTIMIZE_NODE(ModuleLiteral)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
@@ -1072,15 +1096,16 @@ DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
-DONT_INLINE_NODE(FunctionLiteral)
-DONT_INLINE_NODE(RegExpLiteral) // TODO(1322): Allow materialized literals.
DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals.
+DONT_INLINE_NODE(FunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForInStatement)
+DONT_CACHE_NODE(ModuleLiteral)
+
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
if (node->is_jsruntime()) {
@@ -1101,6 +1126,7 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
#undef DONT_OPTIMIZE_NODE
#undef DONT_INLINE_NODE
#undef DONT_SELFOPTIMIZE_NODE
+#undef DONT_CACHE_NODE
Handle<String> Literal::ToString() {
diff --git a/src/3rdparty/v8/src/ast.h b/src/3rdparty/v8/src/ast.h
index d2785c2..d3f90b2 100644
--- a/src/3rdparty/v8/src/ast.h
+++ b/src/3rdparty/v8/src/ast.h
@@ -37,7 +37,7 @@
#include "list-inl.h"
#include "runtime.h"
#include "small-pointer-list.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "token.h"
#include "utils.h"
#include "variables.h"
@@ -158,14 +158,16 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
- virtual AstNode::Type node_type() const { return AstNode::k##type; }
+ virtual AstNode::Type node_type() const { return AstNode::k##type; } \
+ template<class> friend class AstNodeFactory;
enum AstPropertiesFlag {
kDontInline,
kDontOptimize,
kDontSelfOptimize,
- kDontSoftInline
+ kDontSoftInline,
+ kDontCache
};
@@ -194,13 +196,6 @@ class AstNode: public ZoneObject {
};
#undef DECLARE_TYPE_ENUM
- static const int kNoNumber = -1;
- static const int kFunctionEntryId = 2; // Using 0 could disguise errors.
- // This AST id identifies the point after the declarations have been
- // visited. We need it to capture the environment effects of declarations
- // that emit code (function declarations).
- static const int kDeclarationsId = 3;
-
void* operator new(size_t size, Zone* zone) {
return zone->New(static_cast<int>(size));
}
@@ -210,7 +205,7 @@ class AstNode: public ZoneObject {
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
- virtual Type node_type() const { return kInvalid; }
+ virtual Type node_type() const = 0;
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
@@ -219,9 +214,6 @@ class AstNode: public ZoneObject {
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
- virtual Declaration* AsDeclaration() { return NULL; }
- virtual Statement* AsStatement() { return NULL; }
- virtual Expression* AsExpression() { return NULL; }
virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
@@ -238,6 +230,12 @@ class AstNode: public ZoneObject {
return tmp;
}
+ // Some nodes re-use bailout IDs for type feedback.
+ static TypeFeedbackId reuse(BailoutId id) {
+ return TypeFeedbackId(id.ToInt());
+ }
+
+
private:
// Hidden to prevent accidental usage. It would have to load the
// current zone from the TLS.
@@ -251,8 +249,6 @@ class Statement: public AstNode {
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
- virtual Statement* AsStatement() { return this; }
-
bool IsEmpty() { return AsEmptyStatement() != NULL; }
void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -266,17 +262,17 @@ class Statement: public AstNode {
class SmallMapList {
public:
SmallMapList() {}
- explicit SmallMapList(int capacity) : list_(capacity) {}
+ SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
- void Reserve(int capacity) { list_.Reserve(capacity); }
+ void Reserve(int capacity, Zone* zone) { list_.Reserve(capacity, zone); }
void Clear() { list_.Clear(); }
void Sort() { list_.Sort(); }
bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); }
- void Add(Handle<Map> handle) {
- list_.Add(handle.location());
+ void Add(Handle<Map> handle, Zone* zone) {
+ list_.Add(handle.location(), zone);
}
Handle<Map> at(int i) const {
@@ -313,8 +309,6 @@ class Expression: public AstNode {
return 0;
}
- virtual Expression* AsExpression() { return this; }
-
virtual bool IsValidLeftHandSide() { return false; }
// Helpers for ToBoolean conversion.
@@ -355,8 +349,8 @@ class Expression: public AstNode {
return types->at(0);
}
- unsigned id() const { return id_; }
- unsigned test_id() const { return test_id_; }
+ BailoutId id() const { return id_; }
+ TypeFeedbackId test_id() const { return test_id_; }
protected:
explicit Expression(Isolate* isolate)
@@ -364,8 +358,8 @@ class Expression: public AstNode {
test_id_(GetNextId(isolate)) {}
private:
- int id_;
- int test_id_;
+ const BailoutId id_;
+ const TypeFeedbackId test_id_;
};
@@ -389,9 +383,8 @@ class BreakableStatement: public Statement {
// Testers.
bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
- // Bailout support.
- int EntryId() const { return entry_id_; }
- int ExitId() const { return exit_id_; }
+ BailoutId EntryId() const { return entry_id_; }
+ BailoutId ExitId() const { return exit_id_; }
protected:
BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
@@ -407,8 +400,8 @@ class BreakableStatement: public Statement {
ZoneStringList* labels_;
Type type_;
Label break_target_;
- int entry_id_;
- int exit_id_;
+ const BailoutId entry_id_;
+ const BailoutId exit_id_;
};
@@ -416,7 +409,9 @@ class Block: public BreakableStatement {
public:
DECLARE_NODE_TYPE(Block)
- void AddStatement(Statement* statement) { statements_.Add(statement); }
+ void AddStatement(Statement* statement, Zone* zone) {
+ statements_.Add(statement, zone);
+ }
ZoneList<Statement*>* statements() { return &statements_; }
bool is_initializer_block() const { return is_initializer_block_; }
@@ -425,14 +420,13 @@ class Block: public BreakableStatement {
void set_scope(Scope* scope) { scope_ = scope; }
protected:
- template<class> friend class AstNodeFactory;
-
Block(Isolate* isolate,
ZoneStringList* labels,
int capacity,
- bool is_initializer_block)
+ bool is_initializer_block,
+ Zone* zone)
: BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
- statements_(capacity),
+ statements_(capacity, zone),
is_initializer_block_(is_initializer_block),
scope_(NULL) {
}
@@ -452,8 +446,6 @@ class Declaration: public AstNode {
virtual InitializationFlag initialization() const = 0;
virtual bool IsInlineable() const;
- virtual Declaration* AsDeclaration() { return this; }
-
protected:
Declaration(VariableProxy* proxy,
VariableMode mode,
@@ -461,10 +453,7 @@ class Declaration: public AstNode {
: proxy_(proxy),
mode_(mode),
scope_(scope) {
- ASSERT(mode == VAR ||
- mode == CONST ||
- mode == CONST_HARMONY ||
- mode == LET);
+ ASSERT(IsDeclaredVariableMode(mode));
}
private:
@@ -485,8 +474,6 @@ class VariableDeclaration: public Declaration {
}
protected:
- template<class> friend class AstNodeFactory;
-
VariableDeclaration(VariableProxy* proxy,
VariableMode mode,
Scope* scope)
@@ -506,8 +493,6 @@ class FunctionDeclaration: public Declaration {
virtual bool IsInlineable() const;
protected:
- template<class> friend class AstNodeFactory;
-
FunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
@@ -534,8 +519,6 @@ class ModuleDeclaration: public Declaration {
}
protected:
- template<class> friend class AstNodeFactory;
-
ModuleDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope)
@@ -558,8 +541,6 @@ class ImportDeclaration: public Declaration {
}
protected:
- template<class> friend class AstNodeFactory;
-
ImportDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope)
@@ -581,25 +562,27 @@ class ExportDeclaration: public Declaration {
}
protected:
- template<class> friend class AstNodeFactory;
-
- ExportDeclaration(VariableProxy* proxy,
- Scope* scope)
- : Declaration(proxy, LET, scope) {
- }
+ ExportDeclaration(VariableProxy* proxy, Scope* scope)
+ : Declaration(proxy, LET, scope) {}
};
class Module: public AstNode {
public:
Interface* interface() const { return interface_; }
+ Block* body() const { return body_; }
protected:
- Module() : interface_(Interface::NewModule()) {}
- explicit Module(Interface* interface) : interface_(interface) {}
+ explicit Module(Zone* zone)
+ : interface_(Interface::NewModule(zone)),
+ body_(NULL) {}
+ explicit Module(Interface* interface, Block* body = NULL)
+ : interface_(interface),
+ body_(body) {}
private:
Interface* interface_;
+ Block* body_;
};
@@ -607,20 +590,8 @@ class ModuleLiteral: public Module {
public:
DECLARE_NODE_TYPE(ModuleLiteral)
- Block* body() const { return body_; }
- Handle<Context> context() const { return context_; }
-
protected:
- template<class> friend class AstNodeFactory;
-
- ModuleLiteral(Block* body, Interface* interface)
- : Module(interface),
- body_(body) {
- }
-
- private:
- Block* body_;
- Handle<Context> context_;
+ ModuleLiteral(Block* body, Interface* interface) : Module(interface, body) {}
};
@@ -631,8 +602,6 @@ class ModuleVariable: public Module {
VariableProxy* proxy() const { return proxy_; }
protected:
- template<class> friend class AstNodeFactory;
-
inline explicit ModuleVariable(VariableProxy* proxy);
private:
@@ -648,10 +617,9 @@ class ModulePath: public Module {
Handle<String> name() const { return name_; }
protected:
- template<class> friend class AstNodeFactory;
-
- ModulePath(Module* module, Handle<String> name)
- : module_(module),
+ ModulePath(Module* module, Handle<String> name, Zone* zone)
+ : Module(zone),
+ module_(module),
name_(name) {
}
@@ -668,9 +636,8 @@ class ModuleUrl: public Module {
Handle<String> url() const { return url_; }
protected:
- template<class> friend class AstNodeFactory;
-
- explicit ModuleUrl(Handle<String> url) : url_(url) {
+ ModuleUrl(Handle<String> url, Zone* zone)
+ : Module(zone), url_(url) {
}
private:
@@ -685,10 +652,9 @@ class IterationStatement: public BreakableStatement {
Statement* body() const { return body_; }
- // Bailout support.
- int OsrEntryId() const { return osr_entry_id_; }
- virtual int ContinueId() const = 0;
- virtual int StackCheckId() const = 0;
+ BailoutId OsrEntryId() const { return osr_entry_id_; }
+ virtual BailoutId ContinueId() const = 0;
+ virtual BailoutId StackCheckId() const = 0;
// Code generation
Label* continue_target() { return &continue_target_; }
@@ -707,7 +673,7 @@ class IterationStatement: public BreakableStatement {
private:
Statement* body_;
Label continue_target_;
- int osr_entry_id_;
+ const BailoutId osr_entry_id_;
};
@@ -727,14 +693,11 @@ class DoWhileStatement: public IterationStatement {
int condition_position() { return condition_position_; }
void set_condition_position(int pos) { condition_position_ = pos; }
- // Bailout support.
- virtual int ContinueId() const { return continue_id_; }
- virtual int StackCheckId() const { return back_edge_id_; }
- int BackEdgeId() const { return back_edge_id_; }
+ virtual BailoutId ContinueId() const { return continue_id_; }
+ virtual BailoutId StackCheckId() const { return back_edge_id_; }
+ BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
cond_(NULL),
@@ -746,8 +709,8 @@ class DoWhileStatement: public IterationStatement {
private:
Expression* cond_;
int condition_position_;
- int continue_id_;
- int back_edge_id_;
+ const BailoutId continue_id_;
+ const BailoutId back_edge_id_;
};
@@ -768,14 +731,11 @@ class WhileStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- // Bailout support.
- virtual int ContinueId() const { return EntryId(); }
- virtual int StackCheckId() const { return body_id_; }
- int BodyId() const { return body_id_; }
+ virtual BailoutId ContinueId() const { return EntryId(); }
+ virtual BailoutId StackCheckId() const { return body_id_; }
+ BailoutId BodyId() const { return body_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
WhileStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
cond_(NULL),
@@ -787,7 +747,7 @@ class WhileStatement: public IterationStatement {
Expression* cond_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
- int body_id_;
+ const BailoutId body_id_;
};
@@ -816,18 +776,15 @@ class ForStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- // Bailout support.
- virtual int ContinueId() const { return continue_id_; }
- virtual int StackCheckId() const { return body_id_; }
- int BodyId() const { return body_id_; }
+ virtual BailoutId ContinueId() const { return continue_id_; }
+ virtual BailoutId StackCheckId() const { return body_id_; }
+ BailoutId BodyId() const { return body_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; }
void set_loop_variable(Variable* var) { loop_variable_ = var; }
protected:
- template<class> friend class AstNodeFactory;
-
ForStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
init_(NULL),
@@ -846,8 +803,8 @@ class ForStatement: public IterationStatement {
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
- int continue_id_;
- int body_id_;
+ const BailoutId continue_id_;
+ const BailoutId body_id_;
};
@@ -864,14 +821,14 @@ class ForInStatement: public IterationStatement {
Expression* each() const { return each_; }
Expression* enumerable() const { return enumerable_; }
- virtual int ContinueId() const { return EntryId(); }
- virtual int StackCheckId() const { return body_id_; }
- int BodyId() const { return body_id_; }
- int PrepareId() const { return prepare_id_; }
+ virtual BailoutId ContinueId() const { return EntryId(); }
+ virtual BailoutId StackCheckId() const { return body_id_; }
+ BailoutId BodyId() const { return body_id_; }
+ BailoutId PrepareId() const { return prepare_id_; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
+ protected:
ForInStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
each_(NULL),
@@ -883,8 +840,8 @@ class ForInStatement: public IterationStatement {
private:
Expression* each_;
Expression* enumerable_;
- int body_id_;
- int prepare_id_;
+ const BailoutId body_id_;
+ const BailoutId prepare_id_;
};
@@ -896,8 +853,6 @@ class ExpressionStatement: public Statement {
Expression* expression() const { return expression_; }
protected:
- template<class> friend class AstNodeFactory;
-
explicit ExpressionStatement(Expression* expression)
: expression_(expression) { }
@@ -913,8 +868,6 @@ class ContinueStatement: public Statement {
IterationStatement* target() const { return target_; }
protected:
- template<class> friend class AstNodeFactory;
-
explicit ContinueStatement(IterationStatement* target)
: target_(target) { }
@@ -930,8 +883,6 @@ class BreakStatement: public Statement {
BreakableStatement* target() const { return target_; }
protected:
- template<class> friend class AstNodeFactory;
-
explicit BreakStatement(BreakableStatement* target)
: target_(target) { }
@@ -947,8 +898,6 @@ class ReturnStatement: public Statement {
Expression* expression() const { return expression_; }
protected:
- template<class> friend class AstNodeFactory;
-
explicit ReturnStatement(Expression* expression)
: expression_(expression) { }
@@ -965,8 +914,6 @@ class WithStatement: public Statement {
Statement* statement() const { return statement_; }
protected:
- template<class> friend class AstNodeFactory;
-
WithStatement(Expression* expression, Statement* statement)
: expression_(expression),
statement_(statement) { }
@@ -995,10 +942,10 @@ class CaseClause: public ZoneObject {
int position() const { return position_; }
void set_position(int pos) { position_ = pos; }
- int EntryId() { return entry_id_; }
- int CompareId() { return compare_id_; }
+ BailoutId EntryId() const { return entry_id_; }
// Type feedback information.
+ TypeFeedbackId CompareId() { return compare_id_; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
@@ -1018,8 +965,8 @@ class CaseClause: public ZoneObject {
OBJECT_ONLY
};
CompareTypeFeedback compare_type_;
- int compare_id_;
- int entry_id_;
+ const TypeFeedbackId compare_id_;
+ const BailoutId entry_id_;
};
@@ -1036,8 +983,6 @@ class SwitchStatement: public BreakableStatement {
ZoneList<CaseClause*>* cases() const { return cases_; }
protected:
- template<class> friend class AstNodeFactory;
-
SwitchStatement(Isolate* isolate, ZoneStringList* labels)
: BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
tag_(NULL),
@@ -1065,13 +1010,11 @@ class IfStatement: public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
- int IfId() const { return if_id_; }
- int ThenId() const { return then_id_; }
- int ElseId() const { return else_id_; }
+ BailoutId IfId() const { return if_id_; }
+ BailoutId ThenId() const { return then_id_; }
+ BailoutId ElseId() const { return else_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
IfStatement(Isolate* isolate,
Expression* condition,
Statement* then_statement,
@@ -1088,9 +1031,9 @@ class IfStatement: public Statement {
Expression* condition_;
Statement* then_statement_;
Statement* else_statement_;
- int if_id_;
- int then_id_;
- int else_id_;
+ const BailoutId if_id_;
+ const BailoutId then_id_;
+ const BailoutId else_id_;
};
@@ -1098,15 +1041,16 @@ class IfStatement: public Statement {
// stack in the compiler; this should probably be reworked.
class TargetCollector: public AstNode {
public:
- TargetCollector() : targets_(0) { }
+ explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
// references to something on the stack.
- void AddTarget(Label* target);
+ void AddTarget(Label* target, Zone* zone);
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
+ virtual Type node_type() const { return kInvalid; }
virtual TargetCollector* AsTargetCollector() { return this; }
ZoneList<Label*>* targets() { return &targets_; }
@@ -1150,8 +1094,6 @@ class TryCatchStatement: public TryStatement {
Block* catch_block() const { return catch_block_; }
protected:
- template<class> friend class AstNodeFactory;
-
TryCatchStatement(int index,
Block* try_block,
Scope* scope,
@@ -1177,8 +1119,6 @@ class TryFinallyStatement: public TryStatement {
Block* finally_block() const { return finally_block_; }
protected:
- template<class> friend class AstNodeFactory;
-
TryFinallyStatement(int index, Block* try_block, Block* finally_block)
: TryStatement(index, try_block),
finally_block_(finally_block) { }
@@ -1193,8 +1133,6 @@ class DebuggerStatement: public Statement {
DECLARE_NODE_TYPE(DebuggerStatement)
protected:
- template<class> friend class AstNodeFactory;
-
DebuggerStatement() {}
};
@@ -1204,8 +1142,6 @@ class EmptyStatement: public Statement {
DECLARE_NODE_TYPE(EmptyStatement)
protected:
- template<class> friend class AstNodeFactory;
-
EmptyStatement() {}
};
@@ -1256,9 +1192,9 @@ class Literal: public Expression {
return s1->Equals(*s2);
}
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
+ protected:
Literal(Isolate* isolate, Handle<Object> handle)
: Expression(isolate),
handle_(handle) { }
@@ -1361,7 +1297,7 @@ class ObjectLiteral: public MaterializedLiteral {
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- void CalculateEmitStore();
+ void CalculateEmitStore(Zone* zone);
enum Flags {
kNoFlags = 0,
@@ -1376,8 +1312,6 @@ class ObjectLiteral: public MaterializedLiteral {
};
protected:
- template<class> friend class AstNodeFactory;
-
ObjectLiteral(Isolate* isolate,
Handle<FixedArray> constant_properties,
ZoneList<Property*>* properties,
@@ -1409,8 +1343,6 @@ class RegExpLiteral: public MaterializedLiteral {
Handle<String> flags() const { return flags_; }
protected:
- template<class> friend class AstNodeFactory;
-
RegExpLiteral(Isolate* isolate,
Handle<String> pattern,
Handle<String> flags,
@@ -1434,11 +1366,11 @@ class ArrayLiteral: public MaterializedLiteral {
ZoneList<Expression*>* values() const { return values_; }
// Return an AST id for an element that is used in simulate instructions.
- int GetIdForElement(int i) { return first_element_id_ + i; }
+ BailoutId GetIdForElement(int i) {
+ return BailoutId(first_element_id_.ToInt() + i);
+ }
protected:
- template<class> friend class AstNodeFactory;
-
ArrayLiteral(Isolate* isolate,
Handle<FixedArray> constant_elements,
ZoneList<Expression*>* values,
@@ -1453,7 +1385,7 @@ class ArrayLiteral: public MaterializedLiteral {
private:
Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
- int first_element_id_;
+ const BailoutId first_element_id_;
};
@@ -1489,15 +1421,13 @@ class VariableProxy: public Expression {
void BindTo(Variable* var);
protected:
- template<class> friend class AstNodeFactory;
-
VariableProxy(Isolate* isolate, Variable* var);
VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
- int position,
- Interface* interface);
+ Interface* interface,
+ int position);
Handle<String> name_;
Variable* var_; // resolved variable, or NULL
@@ -1521,20 +1451,21 @@ class Property: public Expression {
Expression* key() const { return key_; }
virtual int position() const { return pos_; }
+ BailoutId LoadId() const { return load_id_; }
+
bool IsStringLength() const { return is_string_length_; }
bool IsStringAccess() const { return is_string_access_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
// Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
bool IsArrayLength() { return is_array_length_; }
bool IsUninitialized() { return is_uninitialized_; }
+ TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
protected:
- template<class> friend class AstNodeFactory;
-
Property(Isolate* isolate,
Expression* obj,
Expression* key,
@@ -1543,6 +1474,7 @@ class Property: public Expression {
obj_(obj),
key_(key),
pos_(pos),
+ load_id_(GetNextId(isolate)),
is_monomorphic_(false),
is_uninitialized_(false),
is_array_length_(false),
@@ -1554,6 +1486,7 @@ class Property: public Expression {
Expression* obj_;
Expression* key_;
int pos_;
+ const BailoutId load_id_;
SmallMapList receiver_types_;
bool is_monomorphic_ : 1;
@@ -1573,20 +1506,25 @@ class Call: public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
virtual int position() const { return pos_; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle,
- CallKind call_kind);
+ // Type feedback information.
+ TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind);
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
virtual bool IsMonomorphic() { return is_monomorphic_; }
CheckType check_type() const { return check_type_; }
Handle<JSFunction> target() { return target_; }
+
+ // A cache for the holder, set as a side effect of computing the target of the
+ // call. Note that it contains the null handle when the receiver is the same
+ // as the holder!
Handle<JSObject> holder() { return holder_; }
+
Handle<JSGlobalPropertyCell> cell() { return cell_; }
bool ComputeTarget(Handle<Map> type, Handle<String> name);
bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
- // Bailout support.
- int ReturnId() const { return return_id_; }
+ BailoutId ReturnId() const { return return_id_; }
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
@@ -1594,8 +1532,6 @@ class Call: public Expression {
#endif
protected:
- template<class> friend class AstNodeFactory;
-
Call(Isolate* isolate,
Expression* expression,
ZoneList<Expression*>* arguments,
@@ -1620,7 +1556,7 @@ class Call: public Expression {
Handle<JSObject> holder_;
Handle<JSGlobalPropertyCell> cell_;
- int return_id_;
+ const BailoutId return_id_;
};
@@ -1632,16 +1568,15 @@ class CallNew: public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
virtual int position() const { return pos_; }
+ // Type feedback information.
+ TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
Handle<JSFunction> target() { return target_; }
- // Bailout support.
- int ReturnId() const { return return_id_; }
+ BailoutId ReturnId() const { return return_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
CallNew(Isolate* isolate,
Expression* expression,
ZoneList<Expression*>* arguments,
@@ -1661,7 +1596,7 @@ class CallNew: public Expression {
bool is_monomorphic_;
Handle<JSFunction> target_;
- int return_id_;
+ const BailoutId return_id_;
};
@@ -1678,9 +1613,9 @@ class CallRuntime: public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
bool is_jsruntime() const { return function_ == NULL; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId CallRuntimeFeedbackId() const { return reuse(id()); }
+ protected:
CallRuntime(Isolate* isolate,
Handle<String> name,
const Runtime::Function* function,
@@ -1707,12 +1642,12 @@ class UnaryOperation: public Expression {
Expression* expression() const { return expression_; }
virtual int position() const { return pos_; }
- int MaterializeTrueId() { return materialize_true_id_; }
- int MaterializeFalseId() { return materialize_false_id_; }
+ BailoutId MaterializeTrueId() { return materialize_true_id_; }
+ BailoutId MaterializeFalseId() { return materialize_false_id_; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); }
+ protected:
UnaryOperation(Isolate* isolate,
Token::Value op,
Expression* expression,
@@ -1721,13 +1656,9 @@ class UnaryOperation: public Expression {
op_(op),
expression_(expression),
pos_(pos),
- materialize_true_id_(AstNode::kNoNumber),
- materialize_false_id_(AstNode::kNoNumber) {
+ materialize_true_id_(GetNextId(isolate)),
+ materialize_false_id_(GetNextId(isolate)) {
ASSERT(Token::IsUnaryOp(op));
- if (op == Token::NOT) {
- materialize_true_id_ = GetNextId(isolate);
- materialize_false_id_ = GetNextId(isolate);
- }
}
private:
@@ -1737,8 +1668,8 @@ class UnaryOperation: public Expression {
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
- int materialize_true_id_;
- int materialize_false_id_;
+ const BailoutId materialize_true_id_;
+ const BailoutId materialize_false_id_;
};
@@ -1753,22 +1684,23 @@ class BinaryOperation: public Expression {
Expression* right() const { return right_; }
virtual int position() const { return pos_; }
- // Bailout support.
- int RightId() const { return right_id_; }
+ BailoutId RightId() const { return right_id_; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
+ protected:
BinaryOperation(Isolate* isolate,
Token::Value op,
Expression* left,
Expression* right,
int pos)
- : Expression(isolate), op_(op), left_(left), right_(right), pos_(pos) {
+ : Expression(isolate),
+ op_(op),
+ left_(left),
+ right_(right),
+ pos_(pos),
+ right_id_(GetNextId(isolate)) {
ASSERT(Token::IsBinaryOp(op));
- right_id_ = (op == Token::AND || op == Token::OR)
- ? GetNextId(isolate)
- : AstNode::kNoNumber;
}
private:
@@ -1776,9 +1708,9 @@ class BinaryOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
- // The short-circuit logical operations have an AST ID for their
+ // The short-circuit logical operations need an AST ID for their
// right-hand subexpression.
- int right_id_;
+ const BailoutId right_id_;
};
@@ -1799,17 +1731,16 @@ class CountOperation: public Expression {
virtual void MarkAsStatement() { is_prefix_ = true; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- // Bailout support.
- int AssignmentId() const { return assignment_id_; }
- int CountId() const { return count_id_; }
+ BailoutId AssignmentId() const { return assignment_id_; }
- protected:
- template<class> friend class AstNodeFactory;
+ TypeFeedbackId CountBinOpFeedbackId() const { return count_id_; }
+ TypeFeedbackId CountStoreFeedbackId() const { return reuse(id()); }
+ protected:
CountOperation(Isolate* isolate,
Token::Value op,
bool is_prefix,
@@ -1829,8 +1760,8 @@ class CountOperation: public Expression {
bool is_monomorphic_;
Expression* expression_;
int pos_;
- int assignment_id_;
- int count_id_;
+ const BailoutId assignment_id_;
+ const TypeFeedbackId count_id_;
SmallMapList receiver_types_;
};
@@ -1845,6 +1776,7 @@ class CompareOperation: public Expression {
virtual int position() const { return pos_; }
// Type feedback information.
+ TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
@@ -1855,8 +1787,6 @@ class CompareOperation: public Expression {
bool IsLiteralCompareNull(Expression** expr);
protected:
- template<class> friend class AstNodeFactory;
-
CompareOperation(Isolate* isolate,
Token::Value op,
Expression* left,
@@ -1893,12 +1823,10 @@ class Conditional: public Expression {
int then_expression_position() const { return then_expression_position_; }
int else_expression_position() const { return else_expression_position_; }
- int ThenId() const { return then_id_; }
- int ElseId() const { return else_id_; }
+ BailoutId ThenId() const { return then_id_; }
+ BailoutId ElseId() const { return else_id_; }
protected:
- template<class> friend class AstNodeFactory;
-
Conditional(Isolate* isolate,
Expression* condition,
Expression* then_expression,
@@ -1920,8 +1848,8 @@ class Conditional: public Expression {
Expression* else_expression_;
int then_expression_position_;
int else_expression_position_;
- int then_id_;
- int else_id_;
+ const BailoutId then_id_;
+ const BailoutId else_id_;
};
@@ -1942,27 +1870,15 @@ class Assignment: public Expression {
// This check relies on the definition order of token in token.h.
bool is_compound() const { return op() > Token::ASSIGN; }
- // An initialization block is a series of statments of the form
- // x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and
- // ending of these blocks to allow for optimizations of initialization
- // blocks.
- bool starts_initialization_block() { return block_start_; }
- bool ends_initialization_block() { return block_end_; }
- void mark_block_start() { block_start_ = true; }
- void mark_block_end() { block_end_ = true; }
+ BailoutId AssignmentId() const { return assignment_id_; }
// Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- // Bailout support.
- int CompoundLoadId() const { return compound_load_id_; }
- int AssignmentId() const { return assignment_id_; }
-
protected:
- template<class> friend class AstNodeFactory;
-
Assignment(Isolate* isolate,
Token::Value op,
Expression* target,
@@ -1975,7 +1891,6 @@ class Assignment: public Expression {
if (is_compound()) {
binary_operation_ =
factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1);
- compound_load_id_ = GetNextId(isolate);
}
}
@@ -1985,11 +1900,7 @@ class Assignment: public Expression {
Expression* value_;
int pos_;
BinaryOperation* binary_operation_;
- int compound_load_id_;
- int assignment_id_;
-
- bool block_start_;
- bool block_end_;
+ const BailoutId assignment_id_;
bool is_monomorphic_;
SmallMapList receiver_types_;
@@ -2004,8 +1915,6 @@ class Throw: public Expression {
virtual int position() const { return pos_; }
protected:
- template<class> friend class AstNodeFactory;
-
Throw(Isolate* isolate, Expression* exception, int pos)
: Expression(isolate), exception_(exception), pos_(pos) {}
@@ -2033,6 +1942,11 @@ class FunctionLiteral: public Expression {
kIsFunction
};
+ enum IsParenthesizedFlag {
+ kIsParenthesized,
+ kNotParenthesized
+ };
+
DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return name_; }
@@ -2062,6 +1976,7 @@ class FunctionLiteral: public Expression {
int parameter_count() { return parameter_count_; }
bool AllowsLazyCompilation();
+ bool AllowsLazyCompilationWithoutContext();
Handle<String> debug_name() const {
if (name_->length() > 0) return name_;
@@ -2082,6 +1997,18 @@ class FunctionLiteral: public Expression {
bool is_function() { return IsFunction::decode(bitfield_) == kIsFunction; }
+ // This is used as a heuristic on when to eagerly compile a function
+ // literal. We consider the following constructs as hints that the
+ // function will be called immediately:
+ // - (function() { ... })();
+ // - var x = function() { ... }();
+ bool is_parenthesized() {
+ return IsParenthesized::decode(bitfield_) == kIsParenthesized;
+ }
+ void set_parenthesized() {
+ bitfield_ = IsParenthesized::update(bitfield_, kIsParenthesized);
+ }
+
int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags* flags() { return ast_properties_.flags(); }
void set_ast_properties(AstProperties* ast_properties) {
@@ -2089,8 +2016,6 @@ class FunctionLiteral: public Expression {
}
protected:
- template<class> friend class AstNodeFactory;
-
FunctionLiteral(Isolate* isolate,
Handle<String> name,
Scope* scope,
@@ -2103,7 +2028,8 @@ class FunctionLiteral: public Expression {
int parameter_count,
Type type,
ParameterFlag has_duplicate_parameters,
- IsFunctionFlag is_function)
+ IsFunctionFlag is_function,
+ IsParenthesizedFlag is_parenthesized)
: Expression(isolate),
name_(name),
scope_(scope),
@@ -2122,7 +2048,8 @@ class FunctionLiteral: public Expression {
IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters) |
- IsFunction::encode(is_function);
+ IsFunction::encode(is_function) |
+ IsParenthesized::encode(is_parenthesized);
}
private:
@@ -2146,6 +2073,7 @@ class FunctionLiteral: public Expression {
class Pretenure: public BitField<bool, 3, 1> {};
class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
+ class IsParenthesized: public BitField<IsParenthesizedFlag, 6, 1> {};
};
@@ -2158,8 +2086,6 @@ class SharedFunctionInfoLiteral: public Expression {
}
protected:
- template<class> friend class AstNodeFactory;
-
SharedFunctionInfoLiteral(
Isolate* isolate,
Handle<SharedFunctionInfo> shared_function_info)
@@ -2176,8 +2102,6 @@ class ThisFunction: public Expression {
DECLARE_NODE_TYPE(ThisFunction)
protected:
- template<class> friend class AstNodeFactory;
-
explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
};
@@ -2213,8 +2137,8 @@ class RegExpTree: public ZoneObject {
// Returns the interval of registers used for captures within this
// expression.
virtual Interval CaptureRegisters() { return Interval::Empty(); }
- virtual void AppendToText(RegExpText* text);
- SmartArrayPointer<const char> ToString();
+ virtual void AppendToText(RegExpText* text, Zone* zone);
+ SmartArrayPointer<const char> ToString(Zone* zone);
#define MAKE_ASTYPE(Name) \
virtual RegExp##Name* As##Name(); \
virtual bool Is##Name();
@@ -2299,7 +2223,7 @@ class CharacterSet BASE_EMBEDDED {
explicit CharacterSet(ZoneList<CharacterRange>* ranges)
: ranges_(ranges),
standard_set_type_(0) {}
- ZoneList<CharacterRange>* ranges();
+ ZoneList<CharacterRange>* ranges(Zone* zone);
uc16 standard_set_type() { return standard_set_type_; }
void set_standard_set_type(uc16 special_set_type) {
standard_set_type_ = special_set_type;
@@ -2330,11 +2254,11 @@ class RegExpCharacterClass: public RegExpTree {
virtual bool IsTextElement() { return true; }
virtual int min_match() { return 1; }
virtual int max_match() { return 1; }
- virtual void AppendToText(RegExpText* text);
+ virtual void AppendToText(RegExpText* text, Zone* zone);
CharacterSet character_set() { return set_; }
// TODO(lrn): Remove need for complex version if is_standard that
// recognizes a mangled standard set and just do { return set_.is_special(); }
- bool is_standard();
+ bool is_standard(Zone* zone);
// Returns a value representing the standard character set if is_standard()
// returns true.
// Currently used values are:
@@ -2347,7 +2271,7 @@ class RegExpCharacterClass: public RegExpTree {
// . : non-unicode non-newline
// * : All characters
uc16 standard_type() { return set_.standard_set_type(); }
- ZoneList<CharacterRange>* ranges() { return set_.ranges(); }
+ ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
bool is_negated() { return is_negated_; }
private:
@@ -2367,7 +2291,7 @@ class RegExpAtom: public RegExpTree {
virtual bool IsTextElement() { return true; }
virtual int min_match() { return data_.length(); }
virtual int max_match() { return data_.length(); }
- virtual void AppendToText(RegExpText* text);
+ virtual void AppendToText(RegExpText* text, Zone* zone);
Vector<const uc16> data() { return data_; }
int length() { return data_.length(); }
private:
@@ -2377,7 +2301,7 @@ class RegExpAtom: public RegExpTree {
class RegExpText: public RegExpTree {
public:
- RegExpText() : elements_(2), length_(0) {}
+ explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
virtual void* Accept(RegExpVisitor* visitor, void* data);
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success);
@@ -2386,9 +2310,9 @@ class RegExpText: public RegExpTree {
virtual bool IsTextElement() { return true; }
virtual int min_match() { return length_; }
virtual int max_match() { return length_; }
- virtual void AppendToText(RegExpText* text);
- void AddElement(TextElement elm) {
- elements_.Add(elm);
+ virtual void AppendToText(RegExpText* text, Zone* zone);
+ void AddElement(TextElement elm, Zone* zone) {
+ elements_.Add(elm, zone);
length_ += elm.length();
}
ZoneList<TextElement>* elements() { return &elements_; }
@@ -2635,9 +2559,9 @@ class AstNullVisitor BASE_EMBEDDED {
template<class Visitor>
class AstNodeFactory BASE_EMBEDDED {
public:
- explicit AstNodeFactory(Isolate* isolate)
+ AstNodeFactory(Isolate* isolate, Zone* zone)
: isolate_(isolate),
- zone_(isolate_->zone()) { }
+ zone_(zone) { }
Visitor* visitor() { return &visitor_; }
@@ -2696,12 +2620,12 @@ class AstNodeFactory BASE_EMBEDDED {
}
ModulePath* NewModulePath(Module* origin, Handle<String> name) {
- ModulePath* module = new(zone_) ModulePath(origin, name);
+ ModulePath* module = new(zone_) ModulePath(origin, name, zone_);
VISIT_AND_RETURN(ModulePath, module)
}
ModuleUrl* NewModuleUrl(Handle<String> url) {
- ModuleUrl* module = new(zone_) ModuleUrl(url);
+ ModuleUrl* module = new(zone_) ModuleUrl(url, zone_);
VISIT_AND_RETURN(ModuleUrl, module)
}
@@ -2709,7 +2633,7 @@ class AstNodeFactory BASE_EMBEDDED {
int capacity,
bool is_initializer_block) {
Block* block = new(zone_) Block(
- isolate_, labels, capacity, is_initializer_block);
+ isolate_, labels, capacity, is_initializer_block, zone_);
VISIT_AND_RETURN(Block, block)
}
@@ -2842,11 +2766,10 @@ class AstNodeFactory BASE_EMBEDDED {
VariableProxy* NewVariableProxy(Handle<String> name,
bool is_this,
- int position = RelocInfo::kNoPosition,
- Interface* interface =
- Interface::NewValue()) {
+ Interface* interface = Interface::NewValue(),
+ int position = RelocInfo::kNoPosition) {
VariableProxy* proxy =
- new(zone_) VariableProxy(isolate_, name, is_this, position, interface);
+ new(zone_) VariableProxy(isolate_, name, is_this, interface, position);
VISIT_AND_RETURN(VariableProxy, proxy)
}
@@ -2950,12 +2873,14 @@ class AstNodeFactory BASE_EMBEDDED {
int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::Type type,
- FunctionLiteral::IsFunctionFlag is_function) {
+ FunctionLiteral::IsFunctionFlag is_function,
+ FunctionLiteral::IsParenthesizedFlag is_parenthesized) {
FunctionLiteral* lit = new(zone_) FunctionLiteral(
isolate_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count,
has_only_simple_this_property_assignments, this_property_assignments,
- parameter_count, type, has_duplicate_parameters, is_function);
+ parameter_count, type, has_duplicate_parameters, is_function,
+ is_parenthesized);
// Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) {
visitor_.VisitFunctionLiteral(lit);
diff --git a/src/3rdparty/v8/src/atomicops.h b/src/3rdparty/v8/src/atomicops.h
index 55de87c..ec92ce6 100644
--- a/src/3rdparty/v8/src/atomicops.h
+++ b/src/3rdparty/v8/src/atomicops.h
@@ -69,7 +69,11 @@ typedef intptr_t Atomic64;
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
+#if defined(__OpenBSD__) && defined(__i386__)
+typedef Atomic32 AtomicWord;
+#else
typedef intptr_t AtomicWord;
+#endif
// Atomically execute:
// result = *ptr;
@@ -148,7 +152,7 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
// Include our platform specific implementation.
#if defined(_MSC_VER) && \
- (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+ (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64) || defined(_WIN32_WCE))
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
diff --git a/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h b/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h
index fcf6a65..6677e64 100644
--- a/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h
+++ b/src/3rdparty/v8/src/atomicops_internals_x86_msvc.h
@@ -69,10 +69,16 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
#error "We require at least vs2005 for MemoryBarrier"
#endif
+// For Windows CE there is no MemoryBarrier needed
+#ifdef _WIN32_WCE
+inline void MemoryBarrier() {
+}
+#else
inline void MemoryBarrier() {
// We use MemoryBarrier from WinNT.h
::MemoryBarrier();
}
+#endif
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
diff --git a/src/3rdparty/v8/src/bootstrapper.cc b/src/3rdparty/v8/src/bootstrapper.cc
index 36260ba..c06d73d 100644
--- a/src/3rdparty/v8/src/bootstrapper.cc
+++ b/src/3rdparty/v8/src/bootstrapper.cc
@@ -42,6 +42,7 @@
#include "snapshot.h"
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
+#include "extensions/statistics-extension.h"
namespace v8 {
namespace internal {
@@ -95,6 +96,7 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
extensions_cache_.Initialize(create_heap_objects);
GCExtension::Register();
ExternalizeStringExtension::Register();
+ StatisticsExtension::Register();
}
@@ -154,7 +156,7 @@ class Genesis BASE_EMBEDDED {
Heap* heap() const { return isolate_->heap(); }
private:
- Handle<Context> global_context_;
+ Handle<Context> native_context_;
Isolate* isolate_;
// There may be more than one active genesis object: When GC is
@@ -162,7 +164,7 @@ class Genesis BASE_EMBEDDED {
// processing callbacks which may create new environments.
Genesis* previous_;
- Handle<Context> global_context() { return global_context_; }
+ Handle<Context> native_context() { return native_context_; }
// Creates some basic objects. Used for creating a context from scratch.
void CreateRoots();
@@ -226,13 +228,13 @@ class Genesis BASE_EMBEDDED {
// Used both for deserialized and from-scratch contexts to add the extensions
// provided.
- static bool InstallExtensions(Handle<Context> global_context,
+ static bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
static bool InstallExtension(const char* name,
ExtensionStates* extension_states);
static bool InstallExtension(v8::RegisteredExtension* current,
ExtensionStates* extension_states);
- static void InstallSpecialObjects(Handle<Context> global_context);
+ static void InstallSpecialObjects(Handle<Context> native_context);
bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
@@ -253,16 +255,16 @@ class Genesis BASE_EMBEDDED {
Handle<Map> CreateFunctionMap(PrototypePropertyMode prototype_mode);
- Handle<DescriptorArray> ComputeFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode);
+ void SetFunctionInstanceDescriptor(Handle<Map> map,
+ PrototypePropertyMode prototypeMode);
void MakeFunctionInstancePrototypeWritable();
Handle<Map> CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function);
- Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
- PrototypePropertyMode propertyMode);
+ void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
+ PrototypePropertyMode propertyMode);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@@ -317,7 +319,7 @@ static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
// object.__proto__ = proto;
Factory* factory = object->GetIsolate()->factory();
Handle<Map> old_to_map = Handle<Map>(object->map());
- Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
+ Handle<Map> new_to_map = factory->CopyMap(old_to_map);
new_to_map->set_prototype(*proto);
object->set_map(*new_to_map);
}
@@ -325,22 +327,20 @@ static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
void Bootstrapper::DetachGlobal(Handle<Context> env) {
Factory* factory = env->GetIsolate()->factory();
- JSGlobalProxy::cast(env->global_proxy())->set_context(*factory->null_value());
- SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
- factory->null_value());
- env->set_global_proxy(env->global());
- env->global()->set_global_receiver(env->global());
+ Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
+ global_proxy->set_native_context(*factory->null_value());
+ SetObjectPrototype(global_proxy, factory->null_value());
+ env->set_global_proxy(env->global_object());
+ env->global_object()->set_global_receiver(env->global_object());
}
void Bootstrapper::ReattachGlobal(Handle<Context> env,
- Handle<Object> global_object) {
- ASSERT(global_object->IsJSGlobalProxy());
- Handle<JSGlobalProxy> global = Handle<JSGlobalProxy>::cast(global_object);
- env->global()->set_global_receiver(*global);
- env->set_global_proxy(*global);
- SetObjectPrototype(global, Handle<JSObject>(env->global()));
- global->set_context(*env);
+ Handle<JSGlobalProxy> global_proxy) {
+ env->global_object()->set_global_receiver(*global_proxy);
+ env->set_global_proxy(*global_proxy);
+ SetObjectPrototype(global_proxy, Handle<JSObject>(env->global_object()));
+ global_proxy->set_native_context(*env);
}
@@ -381,54 +381,54 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
}
-Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode) {
+void Genesis::SetFunctionInstanceDescriptor(
+ Handle<Map> map, PrototypePropertyMode prototypeMode) {
int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
- Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
+ Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size));
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+
+ Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
+ Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
+ Handle<Foreign> args(factory()->NewForeign(&Accessors::FunctionArguments));
+ Handle<Foreign> caller(factory()->NewForeign(&Accessors::FunctionCaller));
+ Handle<Foreign> prototype;
+ if (prototypeMode != DONT_ADD_PROTOTYPE) {
+ prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
+ }
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- DescriptorArray::WhitenessWitness witness(*descriptors);
+ map->set_instance_descriptors(*descriptors);
{ // Add length.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
- CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
- descriptors->Set(0, &d, witness);
+ CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add name.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
- CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
- descriptors->Set(1, &d, witness);
+ CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionArguments));
- CallbacksDescriptor d(*factory()->arguments_symbol(), *f, attribs);
- descriptors->Set(2, &d, witness);
+ CallbacksDescriptor d(*factory()->arguments_symbol(), *args, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionCaller));
- CallbacksDescriptor d(*factory()->caller_symbol(), *f, attribs);
- descriptors->Set(3, &d, witness);
+ CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
+ map->AppendDescriptor(&d, witness);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
}
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
- CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
- descriptors->Set(4, &d, witness);
+ CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ map->AppendDescriptor(&d, witness);
}
- descriptors->Sort(witness);
- return descriptors;
}
Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- Handle<DescriptorArray> descriptors =
- ComputeFunctionInstanceDescriptor(prototype_mode);
- map->set_instance_descriptors(*descriptors);
+ SetFunctionInstanceDescriptor(map, prototype_mode);
map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
return map;
}
@@ -442,20 +442,20 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// writable.
Handle<Map> function_instance_map =
CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
- global_context()->set_function_instance_map(*function_instance_map);
+ native_context()->set_function_instance_map(*function_instance_map);
// Functions with this map will not have a 'prototype' property, and
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
CreateFunctionMap(DONT_ADD_PROTOTYPE);
- global_context()->set_function_without_prototype_map(
+ native_context()->set_function_without_prototype_map(
*function_without_prototype_map);
// Allocate the function map. This map is temporary, used only for processing
// of builtins.
// Later the map is replaced with writable prototype map, allocated below.
Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
- global_context()->set_function_map(*function_map);
+ native_context()->set_function_map(*function_map);
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
@@ -475,17 +475,15 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
object_fun->set_initial_map(*object_function_map);
object_function_map->set_constructor(*object_fun);
- global_context()->set_object_function(*object_fun);
+ native_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
Handle<JSObject> prototype = factory->NewJSObject(
isolate->object_function(),
TENURED);
- global_context()->set_initial_object_prototype(*prototype);
+ native_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
- object_function_map->set_instance_descriptors(
- heap->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
@@ -509,63 +507,63 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
empty_function->shared()->DontAdaptArguments();
// Set prototypes for the function maps.
- global_context()->function_map()->set_prototype(*empty_function);
- global_context()->function_instance_map()->set_prototype(*empty_function);
- global_context()->function_without_prototype_map()->
+ native_context()->function_map()->set_prototype(*empty_function);
+ native_context()->function_instance_map()->set_prototype(*empty_function);
+ native_context()->function_without_prototype_map()->
set_prototype(*empty_function);
function_instance_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
empty_function_map->set_prototype(
- global_context()->object_function()->prototype());
+ native_context()->object_function()->prototype());
empty_function->set_map(*empty_function_map);
return empty_function;
}
-Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
- PrototypePropertyMode prototypeMode) {
+void Genesis::SetStrictFunctionInstanceDescriptor(
+ Handle<Map> map, PrototypePropertyMode prototypeMode) {
int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
- Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
+ Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(0, size));
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+
+ Handle<Foreign> length(factory()->NewForeign(&Accessors::FunctionLength));
+ Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
+ Handle<AccessorPair> arguments(factory()->NewAccessorPair());
+ Handle<AccessorPair> caller(factory()->NewAccessorPair());
+ Handle<Foreign> prototype;
+ if (prototypeMode != DONT_ADD_PROTOTYPE) {
+ prototype = factory()->NewForeign(&Accessors::FunctionPrototype);
+ }
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
-
- DescriptorArray::WhitenessWitness witness(*descriptors);
+ map->set_instance_descriptors(*descriptors);
{ // Add length.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
- CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
- descriptors->Set(0, &d, witness);
+ CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add name.
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
- CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
- descriptors->Set(1, &d, witness);
+ CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- Handle<AccessorPair> arguments(factory()->NewAccessorPair());
CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs);
- descriptors->Set(2, &d, witness);
+ map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- Handle<AccessorPair> caller(factory()->NewAccessorPair());
CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
- descriptors->Set(3, &d, witness);
+ map->AppendDescriptor(&d, witness);
}
-
if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
}
- Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
- CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
- descriptors->Set(4, &d, witness);
+ CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ map->AppendDescriptor(&d, witness);
}
-
- descriptors->Sort(witness);
- return descriptors;
}
@@ -578,7 +576,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
Handle<Code> code(isolate()->builtins()->builtin(
Builtins::kStrictModePoisonPill));
throw_type_error_function->set_map(
- global_context()->function_map());
+ native_context()->function_map());
throw_type_error_function->set_code(*code);
throw_type_error_function->shared()->set_code(*code);
throw_type_error_function->shared()->DontAdaptArguments();
@@ -593,9 +591,7 @@ Handle<Map> Genesis::CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- Handle<DescriptorArray> descriptors =
- ComputeStrictFunctionInstanceDescriptor(prototype_mode);
- map->set_instance_descriptors(*descriptors);
+ SetStrictFunctionInstanceDescriptor(map, prototype_mode);
map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
map->set_prototype(*empty_function);
return map;
@@ -606,13 +602,13 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the strict mode function instances.
Handle<Map> strict_mode_function_instance_map =
CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
- global_context()->set_strict_mode_function_instance_map(
+ native_context()->set_strict_mode_function_instance_map(
*strict_mode_function_instance_map);
// Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_mode_function_without_prototype_map =
CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
- global_context()->set_strict_mode_function_without_prototype_map(
+ native_context()->set_strict_mode_function_without_prototype_map(
*strict_mode_function_without_prototype_map);
// Allocate map for the strict mode functions. This map is temporary, used
@@ -620,7 +616,7 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Later the map is replaced with writable prototype map, allocated below.
Handle<Map> strict_mode_function_map =
CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty);
- global_context()->set_strict_mode_function_map(
+ native_context()->set_strict_mode_function_map(
*strict_mode_function_map);
// The final map for the strict mode functions. Writeable prototype.
@@ -641,7 +637,7 @@ static void SetAccessors(Handle<Map> map,
Handle<String> name,
Handle<JSFunction> func) {
DescriptorArray* descs = map->instance_descriptors();
- int number = descs->Search(*name);
+ int number = descs->SearchWithCache(*name, *map);
AccessorPair* accessors = AccessorPair::cast(descs->GetValue(number));
accessors->set_getter(*func);
accessors->set_setter(*func);
@@ -654,39 +650,39 @@ void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) {
}
-static void AddToWeakGlobalContextList(Context* context) {
- ASSERT(context->IsGlobalContext());
+static void AddToWeakNativeContextList(Context* context) {
+ ASSERT(context->IsNativeContext());
Heap* heap = context->GetIsolate()->heap();
#ifdef DEBUG
{ // NOLINT
ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
// Check that context is not in the list yet.
- for (Object* current = heap->global_contexts_list();
+ for (Object* current = heap->native_contexts_list();
!current->IsUndefined();
current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
ASSERT(current != context);
}
}
#endif
- context->set(Context::NEXT_CONTEXT_LINK, heap->global_contexts_list());
- heap->set_global_contexts_list(context);
+ context->set(Context::NEXT_CONTEXT_LINK, heap->native_contexts_list());
+ heap->set_native_contexts_list(context);
}
void Genesis::CreateRoots() {
- // Allocate the global context FixedArray first and then patch the
+ // Allocate the native context FixedArray first and then patch the
// closure and extension object later (we need the empty function
// and the global object, but in order to create those, we need the
- // global context).
- global_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
- *factory()->NewGlobalContext()));
- AddToWeakGlobalContextList(*global_context_);
- isolate()->set_context(*global_context());
+ // native context).
+ native_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
+ *factory()->NewNativeContext()));
+ AddToWeakNativeContextList(*native_context_);
+ isolate()->set_context(*native_context());
// Allocate the message listeners object.
{
v8::NeanderArray listeners;
- global_context()->set_message_listeners(*listeners.value());
+ native_context()->set_message_listeners(*listeners.value());
}
}
@@ -749,6 +745,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
}
js_global_function->initial_map()->set_is_hidden_prototype();
+ js_global_function->initial_map()->set_dictionary_map(true);
Handle<GlobalObject> inner_global =
factory()->NewGlobalObject(js_global_function);
if (inner_global_out != NULL) {
@@ -795,22 +792,23 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global,
Handle<JSGlobalProxy> global_proxy) {
- // Set the global context for the global object.
- inner_global->set_global_context(*global_context());
+ // Set the native context for the global object.
+ inner_global->set_native_context(*native_context());
+ inner_global->set_global_context(*native_context());
inner_global->set_global_receiver(*global_proxy);
- global_proxy->set_context(*global_context());
- global_context()->set_global_proxy(*global_proxy);
+ global_proxy->set_native_context(*native_context());
+ native_context()->set_global_proxy(*global_proxy);
}
void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
Handle<GlobalObject> inner_global_from_snapshot(
- GlobalObject::cast(global_context_->extension()));
- Handle<JSBuiltinsObject> builtins_global(global_context_->builtins());
- global_context_->set_extension(*inner_global);
- global_context_->set_global(*inner_global);
- global_context_->set_qml_global(*inner_global);
- global_context_->set_security_token(*inner_global);
+ GlobalObject::cast(native_context_->extension()));
+ Handle<JSBuiltinsObject> builtins_global(native_context_->builtins());
+ native_context_->set_extension(*inner_global);
+ native_context_->set_global_object(*inner_global);
+ native_context_->set_qml_global_object(*inner_global);
+ native_context_->set_security_token(*inner_global);
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
ForceSetProperty(builtins_global,
@@ -830,17 +828,17 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
// --- G l o b a l C o n t e x t ---
// Use the empty function as closure (no scope info).
- global_context()->set_closure(*empty_function);
- global_context()->set_previous(NULL);
+ native_context()->set_closure(*empty_function);
+ native_context()->set_previous(NULL);
// Set extension and global object.
- global_context()->set_extension(*inner_global);
- global_context()->set_global(*inner_global);
- global_context()->set_qml_global(*inner_global);
+ native_context()->set_extension(*inner_global);
+ native_context()->set_global_object(*inner_global);
+ native_context()->set_qml_global_object(*inner_global);
// Security setup: Set the security token of the global object to
// its the inner global. This makes the security check between two
// different contexts fail by default even in case of global
// object reinitialization.
- global_context()->set_security_token(*inner_global);
+ native_context()->set_security_token(*inner_global);
Isolate* isolate = inner_global->GetIsolate();
Factory* factory = isolate->factory();
@@ -852,7 +850,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
inner_global, object_name,
isolate->object_function(), DONT_ENUM));
- Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+ Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
// Install global Function object
InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
@@ -870,19 +868,27 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
array_function->shared()->set_length(1);
- Handle<DescriptorArray> array_descriptors =
- factory->CopyAppendForeignDescriptor(
- factory->empty_descriptor_array(),
- factory->length_symbol(),
- factory->NewForeign(&Accessors::ArrayLength),
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
+
+ Handle<Map> initial_map(array_function->initial_map());
+ Handle<DescriptorArray> array_descriptors(
+ factory->NewDescriptorArray(0, 1));
+ DescriptorArray::WhitenessWitness witness(*array_descriptors);
+
+ Handle<Foreign> array_length(factory->NewForeign(&Accessors::ArrayLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ initial_map->set_instance_descriptors(*array_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d, witness);
+ }
// array_function is used internally. JS code creating array object should
// search for the 'Array' property on the global object and use that one
// as the constructor. 'Array' property on a global object can be
// overwritten by JS code.
- global_context()->set_array_function(*array_function);
- array_function->initial_map()->set_instance_descriptors(*array_descriptors);
+ native_context()->set_array_function(*array_function);
}
{ // --- N u m b e r ---
@@ -890,7 +896,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal, true);
- global_context()->set_number_function(*number_fun);
+ native_context()->set_number_function(*number_fun);
}
{ // --- B o o l e a n ---
@@ -898,7 +904,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal, true);
- global_context()->set_boolean_function(*boolean_fun);
+ native_context()->set_boolean_function(*boolean_fun);
}
{ // --- S t r i n g ---
@@ -908,20 +914,24 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Builtins::kIllegal, true);
string_fun->shared()->set_construct_stub(
isolate->builtins()->builtin(Builtins::kStringConstructCode));
- global_context()->set_string_function(*string_fun);
- // Add 'length' property to strings.
- Handle<DescriptorArray> string_descriptors =
- factory->CopyAppendForeignDescriptor(
- factory->empty_descriptor_array(),
- factory->length_symbol(),
- factory->NewForeign(&Accessors::StringLength),
- static_cast<PropertyAttributes>(DONT_ENUM |
- DONT_DELETE |
- READ_ONLY));
+ native_context()->set_string_function(*string_fun);
Handle<Map> string_map =
- Handle<Map>(global_context()->string_function()->initial_map());
+ Handle<Map>(native_context()->string_function()->initial_map());
+ Handle<DescriptorArray> string_descriptors(
+ factory->NewDescriptorArray(0, 1));
+ DescriptorArray::WhitenessWitness witness(*string_descriptors);
+
+ Handle<Foreign> string_length(
+ factory->NewForeign(&Accessors::StringLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE | READ_ONLY);
string_map->set_instance_descriptors(*string_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs);
+ string_map->AppendDescriptor(&d, witness);
+ }
}
{ // --- D a t e ---
@@ -931,7 +941,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
isolate->initial_object_prototype(),
Builtins::kIllegal, true);
- global_context()->set_date_function(*date_fun);
+ native_context()->set_date_function(*date_fun);
}
@@ -941,49 +951,46 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal, true);
- global_context()->set_regexp_function(*regexp_fun);
+ native_context()->set_regexp_function(*regexp_fun);
ASSERT(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map());
ASSERT_EQ(0, initial_map->inobject_properties());
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
- DescriptorArray::WhitenessWitness witness(*descriptors);
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- int enum_index = 0;
+ Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 5);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+ initial_map->set_instance_descriptors(*descriptors);
+
{
// ECMA-262, section 15.10.7.1.
FieldDescriptor field(heap->source_symbol(),
JSRegExp::kSourceFieldIndex,
- final,
- enum_index++);
- descriptors->Set(0, &field, witness);
+ final);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.2.
FieldDescriptor field(heap->global_symbol(),
JSRegExp::kGlobalFieldIndex,
- final,
- enum_index++);
- descriptors->Set(1, &field, witness);
+ final);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.3.
FieldDescriptor field(heap->ignore_case_symbol(),
JSRegExp::kIgnoreCaseFieldIndex,
- final,
- enum_index++);
- descriptors->Set(2, &field, witness);
+ final);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.4.
FieldDescriptor field(heap->multiline_symbol(),
JSRegExp::kMultilineFieldIndex,
- final,
- enum_index++);
- descriptors->Set(3, &field, witness);
+ final);
+ initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.5.
@@ -991,24 +998,20 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
FieldDescriptor field(heap->last_index_symbol(),
JSRegExp::kLastIndexFieldIndex,
- writable,
- enum_index++);
- descriptors->Set(4, &field, witness);
+ writable);
+ initial_map->AppendDescriptor(&field, witness);
}
- descriptors->SetNextEnumerationIndex(enum_index);
- descriptors->Sort(witness);
initial_map->set_inobject_properties(5);
initial_map->set_pre_allocated_property_fields(5);
initial_map->set_unused_property_fields(0);
initial_map->set_instance_size(
initial_map->instance_size() + 5 * kPointerSize);
- initial_map->set_instance_descriptors(*descriptors);
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
// RegExp prototype object is itself a RegExp.
- Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
- proto_map->set_prototype(global_context()->initial_object_prototype());
+ Handle<Map> proto_map = factory->CopyMap(initial_map);
+ proto_map->set_prototype(native_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
heap->query_colon_symbol());
@@ -1032,7 +1035,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
{ MaybeObject* result = cons->SetInstancePrototype(
- global_context()->initial_object_prototype());
+ native_context()->initial_object_prototype());
if (result->IsFailure()) return false;
}
cons->SetInstanceClassName(*name);
@@ -1041,7 +1044,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
global, name, json_object, DONT_ENUM));
- global_context()->set_json_object(*json_object);
+ native_context()->set_json_object(*json_object);
}
{ // --- arguments_boilerplate_
@@ -1053,7 +1056,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSObject> prototype =
Handle<JSObject>(
- JSObject::cast(global_context()->object_function()->prototype()));
+ JSObject::cast(native_context()->object_function()->prototype()));
Handle<JSFunction> function =
factory->NewFunctionWithPrototype(symbol,
@@ -1067,7 +1070,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
function->shared()->set_expected_nof_properties(2);
Handle<JSObject> result = factory->NewJSObject(function);
- global_context()->set_arguments_boilerplate(*result);
+ native_context()->set_arguments_boilerplate(*result);
// Note: length must be added as the first property and
// callee must be added as the second property.
CHECK_NOT_EMPTY_HANDLE(isolate,
@@ -1082,11 +1085,11 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
#ifdef DEBUG
LookupResult lookup(isolate);
result->LocalLookup(heap->callee_symbol(), &lookup);
- ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
result->LocalLookup(heap->length_symbol(), &lookup);
- ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
@@ -1094,7 +1097,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object.
ASSERT(result->HasFastProperties());
- ASSERT(result->HasFastElements());
+ ASSERT(result->HasFastObjectElements());
#endif
}
@@ -1108,8 +1111,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
array = factory->NewFixedArray(0);
elements->set(1, *array);
- Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
- Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
+ Handle<Map> old_map(native_context()->arguments_boilerplate()->map());
+ Handle<Map> new_map = factory->CopyMap(old_map);
new_map->set_pre_allocated_property_fields(2);
Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
// Set elements kind after allocating the object because
@@ -1117,7 +1120,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_elements(*elements);
ASSERT(result->HasNonStrictArgumentsElements());
- global_context()->set_aliased_arguments_boilerplate(*result);
+ native_context()->set_aliased_arguments_boilerplate(*result);
}
{ // --- strict mode arguments boilerplate
@@ -1137,39 +1140,43 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
caller->set_getter(*throw_function);
caller->set_setter(*throw_function);
+ // Create the map. Allocate one in-object field for length.
+ Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
+ Heap::kArgumentsObjectSizeStrict);
// Create the descriptor array for the arguments object.
- Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
+ Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 3);
DescriptorArray::WhitenessWitness witness(*descriptors);
+ map->set_instance_descriptors(*descriptors);
+
{ // length
FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
- descriptors->Set(0, &d, witness);
+ map->AppendDescriptor(&d, witness);
}
{ // callee
- CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
- descriptors->Set(1, &d, witness);
+ CallbacksDescriptor d(*factory->callee_symbol(),
+ *callee,
+ attributes);
+ map->AppendDescriptor(&d, witness);
}
{ // caller
- CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
- descriptors->Set(2, &d, witness);
+ CallbacksDescriptor d(*factory->caller_symbol(),
+ *caller,
+ attributes);
+ map->AppendDescriptor(&d, witness);
}
- descriptors->Sort(witness);
- // Create the map. Allocate one in-object field for length.
- Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kArgumentsObjectSizeStrict);
- map->set_instance_descriptors(*descriptors);
map->set_function_with_prototype(true);
- map->set_prototype(global_context()->object_function()->prototype());
+ map->set_prototype(native_context()->object_function()->prototype());
map->set_pre_allocated_property_fields(1);
map->set_inobject_properties(1);
// Copy constructor from the non-strict arguments boilerplate.
map->set_constructor(
- global_context()->arguments_boilerplate()->map()->constructor());
+ native_context()->arguments_boilerplate()->map()->constructor());
// Allocate the arguments boilerplate object.
Handle<JSObject> result = factory->NewJSObjectFromMap(map);
- global_context()->set_strict_mode_arguments_boilerplate(*result);
+ native_context()->set_strict_mode_arguments_boilerplate(*result);
// Add length property only for strict mode boilerplate.
CHECK_NOT_EMPTY_HANDLE(isolate,
@@ -1180,14 +1187,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
#ifdef DEBUG
LookupResult lookup(isolate);
result->LocalLookup(heap->length_symbol(), &lookup);
- ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
// Check the state of the object.
ASSERT(result->HasFastProperties());
- ASSERT(result->HasFastElements());
+ ASSERT(result->HasFastObjectElements());
#endif
}
@@ -1204,7 +1211,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<String> name = factory->LookupAsciiSymbol("context_extension");
context_extension_fun->shared()->set_instance_class_name(*name);
- global_context()->set_context_extension_function(*context_extension_fun);
+ native_context()->set_context_extension_function(*context_extension_fun);
}
@@ -1216,7 +1223,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> delegate =
factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
- global_context()->set_call_as_function_delegate(*delegate);
+ native_context()->set_call_as_function_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
@@ -1228,21 +1235,21 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> delegate =
factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
- global_context()->set_call_as_constructor_delegate(*delegate);
+ native_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
// Initialize the out of memory slot.
- global_context()->set_out_of_memory(heap->false_value());
+ native_context()->set_out_of_memory(heap->false_value());
// Initialize the data slot.
- global_context()->set_data(heap->undefined_value());
+ native_context()->set_data(heap->undefined_value());
{
// Initialize the random seed slot.
Handle<ByteArray> zeroed_byte_array(
factory->NewByteArray(kRandomStateSize));
- global_context()->set_random_seed(*zeroed_byte_array);
+ native_context()->set_random_seed(*zeroed_byte_array);
memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
}
return true;
@@ -1250,7 +1257,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
void Genesis::InitializeExperimentalGlobal() {
- Handle<JSObject> global = Handle<JSObject>(global_context()->global());
+ Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
// TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
// longer need to live behind a flag, so functions get added to the snapshot.
@@ -1342,6 +1349,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
script_name,
0,
0,
+ top_context,
extension,
NULL,
Handle<String>::null(),
@@ -1353,7 +1361,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
// Set up the function context. Conceptually, we should clone the
// function before overwriting the context but since we're in a
// single-threaded environment it is not strictly necessary.
- ASSERT(top_context->IsGlobalContext());
+ ASSERT(top_context->IsNativeContext());
Handle<Context> context =
Handle<Context>(use_runtime_context
? Handle<Context>(top_context->runtime_context())
@@ -1366,7 +1374,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
Handle<Object> receiver =
Handle<Object>(use_runtime_context
? top_context->builtins()
- : top_context->global());
+ : top_context->global_object());
bool has_pending_exception;
Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false;
@@ -1377,9 +1385,9 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
#define INSTALL_NATIVE(Type, name, var) \
Handle<String> var##_name = factory()->LookupAsciiSymbol(name); \
Object* var##_native = \
- global_context()->builtins()->GetPropertyNoExceptionThrown( \
+ native_context()->builtins()->GetPropertyNoExceptionThrown( \
*var##_name); \
- global_context()->set_##var(Type::cast(var##_native));
+ native_context()->set_##var(Type::cast(var##_native));
void Genesis::InstallNativeFunctions() {
@@ -1409,6 +1417,11 @@ void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
+ if (FLAG_harmony_observation) {
+ INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
+ INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
+ observers_deliver_changes);
+ }
}
#undef INSTALL_NATIVE
@@ -1419,7 +1432,7 @@ bool Genesis::InstallNatives() {
// Create a function for the builtins object. Allocate space for the
// JavaScript builtins, a reference to the builtins object
- // (itself) and a reference to the global_context directly in the object.
+ // (itself) and a reference to the native_context directly in the object.
Handle<Code> code = Handle<Code>(
isolate()->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> builtins_fun =
@@ -1429,12 +1442,15 @@ bool Genesis::InstallNatives() {
Handle<String> name = factory()->LookupAsciiSymbol("builtins");
builtins_fun->shared()->set_instance_class_name(*name);
+ builtins_fun->initial_map()->set_dictionary_map(true);
+ builtins_fun->initial_map()->set_prototype(heap()->null_value());
// Allocate the builtins object.
Handle<JSBuiltinsObject> builtins =
Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
builtins->set_builtins(*builtins);
- builtins->set_global_context(*global_context());
+ builtins->set_native_context(*native_context());
+ builtins->set_global_context(*native_context());
builtins->set_global_receiver(*builtins);
// Set up the 'global' properties of the builtins object. The
@@ -1444,26 +1460,27 @@ bool Genesis::InstallNatives() {
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
- Handle<Object> global_obj(global_context()->global());
+ Handle<Object> global_obj(native_context()->global_object());
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
builtins, global_symbol, global_obj, attributes));
// Set up the reference from the global object to the builtins object.
- JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
+ JSGlobalObject::cast(native_context()->global_object())->
+ set_builtins(*builtins);
- // Create a bridge function that has context in the global context.
+ // Create a bridge function that has context in the native context.
Handle<JSFunction> bridge =
factory()->NewFunction(factory()->empty_symbol(),
factory()->undefined_value());
- ASSERT(bridge->context() == *isolate()->global_context());
+ ASSERT(bridge->context() == *isolate()->native_context());
// Allocate the builtins context.
Handle<Context> context =
factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
- context->set_global(*builtins); // override builtins global object
+ context->set_global_object(*builtins); // override builtins global object
- global_context()->set_runtime_context(*context);
+ native_context()->set_runtime_context(*context);
{ // -- S c r i p t
// Builtin functions for Script.
@@ -1474,118 +1491,134 @@ bool Genesis::InstallNatives() {
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(script_fun, prototype);
- global_context()->set_script_function(*script_fun);
-
- // Add 'source' and 'data' property to scripts.
- PropertyAttributes common_attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Handle<Foreign> foreign_source =
- factory()->NewForeign(&Accessors::ScriptSource);
- Handle<DescriptorArray> script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- factory()->empty_descriptor_array(),
- factory()->LookupAsciiSymbol("source"),
- foreign_source,
- common_attributes);
- Handle<Foreign> foreign_name =
- factory()->NewForeign(&Accessors::ScriptName);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("name"),
- foreign_name,
- common_attributes);
- Handle<Foreign> foreign_id = factory()->NewForeign(&Accessors::ScriptId);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("id"),
- foreign_id,
- common_attributes);
- Handle<Foreign> foreign_line_offset =
- factory()->NewForeign(&Accessors::ScriptLineOffset);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("line_offset"),
- foreign_line_offset,
- common_attributes);
- Handle<Foreign> foreign_column_offset =
- factory()->NewForeign(&Accessors::ScriptColumnOffset);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("column_offset"),
- foreign_column_offset,
- common_attributes);
- Handle<Foreign> foreign_data =
- factory()->NewForeign(&Accessors::ScriptData);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("data"),
- foreign_data,
- common_attributes);
- Handle<Foreign> foreign_type =
- factory()->NewForeign(&Accessors::ScriptType);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("type"),
- foreign_type,
- common_attributes);
- Handle<Foreign> foreign_compilation_type =
- factory()->NewForeign(&Accessors::ScriptCompilationType);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("compilation_type"),
- foreign_compilation_type,
- common_attributes);
- Handle<Foreign> foreign_line_ends =
- factory()->NewForeign(&Accessors::ScriptLineEnds);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("line_ends"),
- foreign_line_ends,
- common_attributes);
- Handle<Foreign> foreign_context_data =
- factory()->NewForeign(&Accessors::ScriptContextData);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("context_data"),
- foreign_context_data,
- common_attributes);
- Handle<Foreign> foreign_eval_from_script =
- factory()->NewForeign(&Accessors::ScriptEvalFromScript);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("eval_from_script"),
- foreign_eval_from_script,
- common_attributes);
- Handle<Foreign> foreign_eval_from_script_position =
- factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("eval_from_script_position"),
- foreign_eval_from_script_position,
- common_attributes);
- Handle<Foreign> foreign_eval_from_function_name =
- factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName);
- script_descriptors =
- factory()->CopyAppendForeignDescriptor(
- script_descriptors,
- factory()->LookupAsciiSymbol("eval_from_function_name"),
- foreign_eval_from_function_name,
- common_attributes);
+ native_context()->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
+
+ Handle<DescriptorArray> script_descriptors(
+ factory()->NewDescriptorArray(0, 13));
+ DescriptorArray::WhitenessWitness witness(*script_descriptors);
+
+ Handle<Foreign> script_source(
+ factory()->NewForeign(&Accessors::ScriptSource));
+ Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName));
+ Handle<String> id_symbol(factory()->LookupAsciiSymbol("id"));
+ Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId));
+ Handle<String> line_offset_symbol(
+ factory()->LookupAsciiSymbol("line_offset"));
+ Handle<Foreign> script_line_offset(
+ factory()->NewForeign(&Accessors::ScriptLineOffset));
+ Handle<String> column_offset_symbol(
+ factory()->LookupAsciiSymbol("column_offset"));
+ Handle<Foreign> script_column_offset(
+ factory()->NewForeign(&Accessors::ScriptColumnOffset));
+ Handle<String> data_symbol(factory()->LookupAsciiSymbol("data"));
+ Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
+ Handle<String> type_symbol(factory()->LookupAsciiSymbol("type"));
+ Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
+ Handle<String> compilation_type_symbol(
+ factory()->LookupAsciiSymbol("compilation_type"));
+ Handle<Foreign> script_compilation_type(
+ factory()->NewForeign(&Accessors::ScriptCompilationType));
+ Handle<String> line_ends_symbol(factory()->LookupAsciiSymbol("line_ends"));
+ Handle<Foreign> script_line_ends(
+ factory()->NewForeign(&Accessors::ScriptLineEnds));
+ Handle<String> context_data_symbol(
+ factory()->LookupAsciiSymbol("context_data"));
+ Handle<Foreign> script_context_data(
+ factory()->NewForeign(&Accessors::ScriptContextData));
+ Handle<String> eval_from_script_symbol(
+ factory()->LookupAsciiSymbol("eval_from_script"));
+ Handle<Foreign> script_eval_from_script(
+ factory()->NewForeign(&Accessors::ScriptEvalFromScript));
+ Handle<String> eval_from_script_position_symbol(
+ factory()->LookupAsciiSymbol("eval_from_script_position"));
+ Handle<Foreign> script_eval_from_script_position(
+ factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition));
+ Handle<String> eval_from_function_name_symbol(
+ factory()->LookupAsciiSymbol("eval_from_function_name"));
+ Handle<Foreign> script_eval_from_function_name(
+ factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
script_map->set_instance_descriptors(*script_descriptors);
+ {
+ CallbacksDescriptor d(
+ *factory()->source_symbol(), *script_source, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*factory()->name_symbol(), *script_name, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*id_symbol, *script_id, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*line_offset_symbol, *script_line_offset, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *column_offset_symbol, *script_column_offset, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*data_symbol, *script_data, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*type_symbol, *script_type, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *compilation_type_symbol, *script_compilation_type, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(*line_ends_symbol, *script_line_ends, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *context_data_symbol, *script_context_data, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *eval_from_script_symbol, *script_eval_from_script, attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *eval_from_script_position_symbol,
+ *script_eval_from_script_position,
+ attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
+ {
+ CallbacksDescriptor d(
+ *eval_from_function_name_symbol,
+ *script_eval_from_function_name,
+ attribs);
+ script_map->AppendDescriptor(&d, witness);
+ }
+
// Allocate the empty script.
Handle<Script> script = factory()->NewScript(factory()->empty_string());
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
@@ -1603,7 +1636,7 @@ bool Genesis::InstallNatives() {
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(opaque_reference_fun, prototype);
- global_context()->set_opaque_reference_function(*opaque_reference_fun);
+ native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
{ // --- I n t e r n a l A r r a y ---
@@ -1633,25 +1666,31 @@ bool Genesis::InstallNatives() {
// elements in InternalArrays can be set to non-Smi values without going
// through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
// transition easy to trap. Moreover, they rarely are smi-only.
- MaybeObject* maybe_map =
- array_function->initial_map()->CopyDropTransitions();
+ MaybeObject* maybe_map = array_function->initial_map()->Copy();
Map* new_map;
- if (!maybe_map->To<Map>(&new_map)) return false;
- new_map->set_elements_kind(FAST_ELEMENTS);
+ if (!maybe_map->To(&new_map)) return false;
+ new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
array_function->set_initial_map(new_map);
// Make "length" magic on instances.
- Handle<DescriptorArray> array_descriptors =
- factory()->CopyAppendForeignDescriptor(
- factory()->empty_descriptor_array(),
- factory()->length_symbol(),
- factory()->NewForeign(&Accessors::ArrayLength),
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
-
- array_function->initial_map()->set_instance_descriptors(
- *array_descriptors);
+ Handle<Map> initial_map(array_function->initial_map());
+ Handle<DescriptorArray> array_descriptors(
+ factory()->NewDescriptorArray(0, 1));
+ DescriptorArray::WhitenessWitness witness(*array_descriptors);
+
+ Handle<Foreign> array_length(factory()->NewForeign(
+ &Accessors::ArrayLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ initial_map->set_instance_descriptors(*array_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(
+ *factory()->length_symbol(), *array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d, witness);
+ }
- global_context()->set_internal_array_function(*array_function);
+ native_context()->set_internal_array_function(*array_function);
}
if (FLAG_disable_native_files) {
@@ -1674,16 +1713,16 @@ bool Genesis::InstallNatives() {
// Store the map for the string prototype after the natives has been compiled
// and the String function has been set up.
- Handle<JSFunction> string_function(global_context()->string_function());
+ Handle<JSFunction> string_function(native_context()->string_function());
ASSERT(JSObject::cast(
string_function->initial_map()->prototype())->HasFastProperties());
- global_context()->set_string_function_prototype_map(
+ native_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
// Install Function.prototype.call and apply.
{ Handle<String> key = factory()->function_class_symbol();
Handle<JSFunction> function =
- Handle<JSFunction>::cast(GetProperty(isolate()->global(), key));
+ Handle<JSFunction>::cast(GetProperty(isolate()->global_object(), key));
Handle<JSObject> proto =
Handle<JSObject>(JSObject::cast(function->instance_prototype()));
@@ -1721,7 +1760,7 @@ bool Genesis::InstallNatives() {
// RegExpResult initial map.
// Find global.Array.prototype to inherit from.
- Handle<JSFunction> array_constructor(global_context()->array_function());
+ Handle<JSFunction> array_constructor(native_context()->array_function());
Handle<JSObject> array_prototype(
JSObject::cast(array_constructor->instance_prototype()));
@@ -1736,44 +1775,45 @@ bool Genesis::InstallNatives() {
// Update map with length accessor from Array and add "index" and "input".
Handle<DescriptorArray> reresult_descriptors =
- factory()->NewDescriptorArray(3);
+ factory()->NewDescriptorArray(0, 3);
DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
+ initial_map->set_instance_descriptors(*reresult_descriptors);
- JSFunction* array_function = global_context()->array_function();
- Handle<DescriptorArray> array_descriptors(
- array_function->initial_map()->instance_descriptors());
- int index = array_descriptors->SearchWithCache(heap()->length_symbol());
- MaybeObject* copy_result =
- reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness);
- if (copy_result->IsFailure()) return false;
-
- int enum_index = 0;
+ {
+ JSFunction* array_function = native_context()->array_function();
+ Handle<DescriptorArray> array_descriptors(
+ array_function->initial_map()->instance_descriptors());
+ String* length = heap()->length_symbol();
+ int old = array_descriptors->SearchWithCache(
+ length, array_function->initial_map());
+ ASSERT(old != DescriptorArray::kNotFound);
+ CallbacksDescriptor desc(length,
+ array_descriptors->GetValue(old),
+ array_descriptors->GetDetails(old).attributes());
+ initial_map->AppendDescriptor(&desc, witness);
+ }
{
FieldDescriptor index_field(heap()->index_symbol(),
JSRegExpResult::kIndexIndex,
- NONE,
- enum_index++);
- reresult_descriptors->Set(1, &index_field, witness);
+ NONE);
+ initial_map->AppendDescriptor(&index_field, witness);
}
{
FieldDescriptor input_field(heap()->input_symbol(),
JSRegExpResult::kInputIndex,
- NONE,
- enum_index++);
- reresult_descriptors->Set(2, &input_field, witness);
+ NONE);
+ initial_map->AppendDescriptor(&input_field, witness);
}
- reresult_descriptors->Sort(witness);
initial_map->set_inobject_properties(2);
initial_map->set_pre_allocated_property_fields(2);
initial_map->set_unused_property_fields(0);
- initial_map->set_instance_descriptors(*reresult_descriptors);
- global_context()->set_regexp_result_map(*initial_map);
+ native_context()->set_regexp_result_map(*initial_map);
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
builtins->Verify();
#endif
@@ -1795,6 +1835,11 @@ bool Genesis::InstallExperimentalNatives() {
"native collection.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
+ if (FLAG_harmony_observation &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native object-observe.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
}
InstallExperimentalNativeFunctions();
@@ -1804,10 +1849,10 @@ bool Genesis::InstallExperimentalNatives() {
static Handle<JSObject> ResolveBuiltinIdHolder(
- Handle<Context> global_context,
+ Handle<Context> native_context,
const char* holder_expr) {
- Factory* factory = global_context->GetIsolate()->factory();
- Handle<GlobalObject> global(global_context->global());
+ Factory* factory = native_context->GetIsolate()->factory();
+ Handle<GlobalObject> global(native_context->global_object());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
return Handle<JSObject>::cast(
@@ -1838,7 +1883,7 @@ void Genesis::InstallBuiltinFunctionIds() {
#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
{ \
Handle<JSObject> holder = ResolveBuiltinIdHolder( \
- global_context(), #holder_expr); \
+ native_context(), #holder_expr); \
BuiltinFunctionId id = k##name; \
InstallBuiltinFunctionId(holder, #fun_name, id); \
}
@@ -1850,7 +1895,7 @@ void Genesis::InstallBuiltinFunctionIds() {
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
- F(16, global_context()->regexp_function())
+ F(16, native_context()->regexp_function())
static FixedArray* CreateCache(int size, Handle<JSFunction> factory_function) {
@@ -1886,34 +1931,35 @@ void Genesis::InstallJSFunctionResultCaches() {
#undef F
- global_context()->set_jsfunction_result_caches(*caches);
+ native_context()->set_jsfunction_result_caches(*caches);
}
void Genesis::InitializeNormalizedMapCaches() {
Handle<FixedArray> array(
FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
- global_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
+ native_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
}
-bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
+bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
- Isolate* isolate = global_context->GetIsolate();
+ Isolate* isolate = native_context->GetIsolate();
BootstrapperActive active;
SaveContext saved_context(isolate);
- isolate->set_context(*global_context);
- if (!Genesis::InstallExtensions(global_context, extensions)) return false;
- Genesis::InstallSpecialObjects(global_context);
+ isolate->set_context(*native_context);
+ if (!Genesis::InstallExtensions(native_context, extensions)) return false;
+ Genesis::InstallSpecialObjects(native_context);
return true;
}
-void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
- Isolate* isolate = global_context->GetIsolate();
+void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
+ Isolate* isolate = native_context->GetIsolate();
Factory* factory = isolate->factory();
HandleScope scope;
- Handle<JSGlobalObject> global(JSGlobalObject::cast(global_context->global()));
+ Handle<JSGlobalObject> global(JSGlobalObject::cast(
+ native_context->global_object()));
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as);
@@ -1942,10 +1988,10 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
// debugger but without tanking the whole context.
if (!debug->Load()) return;
// Set the security token for the debugger context to the same as
- // the shell global context to allow calling between these (otherwise
+ // the shell native context to allow calling between these (otherwise
// exposing debug global object doesn't make much sense).
debug->debug_context()->set_security_token(
- global_context->security_token());
+ native_context->security_token());
Handle<String> debug_string =
factory->LookupAsciiSymbol(FLAG_expose_debug_as);
@@ -1984,7 +2030,7 @@ void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
reinterpret_cast<void*>(static_cast<intptr_t>(state));
}
-bool Genesis::InstallExtensions(Handle<Context> global_context,
+bool Genesis::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
// TODO(isolates): Extensions on multiple isolates may take a little more
// effort. (The external API reads 'ignore'-- does that mean
@@ -2004,6 +2050,9 @@ bool Genesis::InstallExtensions(Handle<Context> global_context,
if (FLAG_expose_externalize_string) {
InstallExtension("v8/externalize", &extension_states);
}
+ if (FLAG_track_gc_object_stats) {
+ InstallExtension("v8/statistics", &extension_states);
+ }
if (extensions == NULL) return true;
// Install required extensions
@@ -2094,14 +2143,10 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<JSFunction> function
= Handle<JSFunction>(JSFunction::cast(function_object));
builtins->set_javascript_builtin(id, *function);
- Handle<SharedFunctionInfo> shared
- = Handle<SharedFunctionInfo>(function->shared());
- if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+ if (!JSFunction::CompileLazy(function, CLEAR_EXCEPTION)) {
return false;
}
- // Set the code object on the function object.
- function->ReplaceCode(function->shared()->code());
- builtins->set_javascript_builtin_code(id, shared->code());
+ builtins->set_javascript_builtin_code(id, function->shared()->code());
}
return true;
}
@@ -2110,8 +2155,9 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
bool Genesis::ConfigureGlobalObjects(
v8::Handle<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy(
- JSObject::cast(global_context()->global_proxy()));
- Handle<JSObject> inner_global(JSObject::cast(global_context()->global()));
+ JSObject::cast(native_context()->global_proxy()));
+ Handle<JSObject> inner_global(
+ JSObject::cast(native_context()->global_object()));
if (!global_proxy_template.IsEmpty()) {
// Configure the global proxy object.
@@ -2185,27 +2231,24 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
LookupResult result(isolate());
to->LocalLookup(descs->GetKey(i), &result);
// If the property is already there we skip it
- if (result.IsProperty()) continue;
+ if (result.IsFound()) continue;
HandleScope inner;
ASSERT(!to->HasFastProperties());
// Add to dictionary.
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<Object> callbacks(descs->GetCallbacksObject(i));
- PropertyDetails d =
- PropertyDetails(details.attributes(), CALLBACKS, details.index());
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ CALLBACKS,
+ details.descriptor_index());
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
- case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- // Ignore non-properties.
- break;
case NORMAL:
// Do not occur since the from object has fast properties.
case HANDLER:
case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
// No element in instance descriptors have proxy or interceptor type.
UNREACHABLE();
break;
@@ -2222,7 +2265,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// If the property is already there we skip it.
LookupResult result(isolate());
to->LocalLookup(String::cast(raw_key), &result);
- if (result.IsProperty()) continue;
+ if (result.IsFound()) continue;
// Set the property.
Handle<String> key = Handle<String>(String::cast(raw_key));
Handle<Object> value = Handle<Object>(properties->ValueAt(i));
@@ -2261,7 +2304,7 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
// Transfer the prototype (new map is needed).
Handle<Map> old_to_map = Handle<Map>(to->map());
- Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
+ Handle<Map> new_to_map = factory->CopyMap(old_to_map);
new_to_map->set_prototype(from->map()->prototype());
to->set_map(*new_to_map);
}
@@ -2275,9 +2318,9 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
ASSERT(!strict_mode_function_instance_map_writable_prototype_.is_null());
// Replace function instance maps to make prototype writable.
- global_context()->set_function_map(
+ native_context()->set_function_map(
*function_instance_map_writable_prototype_);
- global_context()->set_strict_mode_function_map(
+ native_context()->set_strict_mode_function_map(
*strict_mode_function_instance_map_writable_prototype_);
}
@@ -2303,10 +2346,10 @@ Genesis::Genesis(Isolate* isolate,
Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
if (!new_context.is_null()) {
- global_context_ =
+ native_context_ =
Handle<Context>::cast(isolate->global_handles()->Create(*new_context));
- AddToWeakGlobalContextList(*global_context_);
- isolate->set_context(*global_context_);
+ AddToWeakNativeContextList(*native_context_);
+ isolate->set_context(*native_context_);
isolate->counters()->contexts_created_by_snapshot()->Increment();
Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =
@@ -2342,7 +2385,7 @@ Genesis::Genesis(Isolate* isolate,
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
- result_ = global_context_;
+ result_ = native_context_;
}
diff --git a/src/3rdparty/v8/src/bootstrapper.h b/src/3rdparty/v8/src/bootstrapper.h
index 101c2e1..179e65c 100644
--- a/src/3rdparty/v8/src/bootstrapper.h
+++ b/src/3rdparty/v8/src/bootstrapper.h
@@ -104,7 +104,7 @@ class Bootstrapper {
void DetachGlobal(Handle<Context> env);
// Reattach an outer global object to an environment.
- void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
+ void ReattachGlobal(Handle<Context> env, Handle<JSGlobalProxy> global_proxy);
// Traverses the pointers for memory management.
void Iterate(ObjectVisitor* v);
@@ -126,7 +126,7 @@ class Bootstrapper {
char* AllocateAutoDeletedArray(int bytes);
// Used for new context creation.
- bool InstallExtensions(Handle<Context> global_context,
+ bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
diff --git a/src/3rdparty/v8/src/builtins.cc b/src/3rdparty/v8/src/builtins.cc
index 84a0c3d..620e4b3 100644
--- a/src/3rdparty/v8/src/builtins.cc
+++ b/src/3rdparty/v8/src/builtins.cc
@@ -35,6 +35,7 @@
#include "ic-inl.h"
#include "heap-profiler.h"
#include "mark-compact.h"
+#include "stub-cache.h"
#include "vm-state-inl.h"
namespace v8 {
@@ -199,10 +200,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array->set_length(Smi::FromInt(0));
array->set_elements(heap->empty_fixed_array());
if (!FLAG_smi_only_arrays) {
- Context* global_context = isolate->context()->global_context();
- if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
- !global_context->object_js_array_map()->IsUndefined()) {
- array->set_map(Map::cast(global_context->object_js_array_map()));
+ Context* native_context = isolate->context()->native_context();
+ if (array->GetElementsKind() == GetInitialFastElementsKind() &&
+ !native_context->js_array_maps()->IsUndefined()) {
+ FixedArray* map_array =
+ FixedArray::cast(native_context->js_array_maps());
+ array->set_map(Map::cast(map_array->
+ get(TERMINAL_FAST_ELEMENTS_KIND)));
}
}
} else {
@@ -222,6 +226,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
}
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (!IsFastHoleyElementsKind(elements_kind)) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ MaybeObject* maybe_array =
+ array->TransitionElementsKind(elements_kind);
+ if (maybe_array->IsFailure()) return maybe_array;
+ }
// We do not use SetContent to skip the unnecessary elements type check.
array->set_elements(FixedArray::cast(fixed_array));
array->set_length(Smi::cast(obj));
@@ -250,7 +261,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Allocate an appropriately typed elements array.
MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind();
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ if (IsFastDoubleElementsKind(elements_kind)) {
maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
number_of_elements);
} else {
@@ -261,13 +272,15 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Fill in the content
switch (array->GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS: {
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
FixedArray* smi_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
}
break;
}
+ case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@@ -277,6 +290,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
}
break;
}
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
@@ -299,7 +313,7 @@ BUILTIN(InternalArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
- isolate->context()->global_context()->internal_array_function());
+ isolate->context()->native_context()->internal_array_function());
}
@@ -307,7 +321,7 @@ BUILTIN(ArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
- isolate->context()->global_context()->array_function());
+ isolate->context()->native_context()->array_function());
}
@@ -389,7 +403,7 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
static bool ArrayPrototypeHasNoElements(Heap* heap,
- Context* global_context,
+ Context* native_context,
JSObject* array_proto) {
// This method depends on non writability of Object and Array prototype
// fields.
@@ -398,7 +412,7 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
Object* proto = array_proto->GetPrototype();
if (proto == heap->null_value()) return false;
array_proto = JSObject::cast(proto);
- if (array_proto != global_context->initial_object_prototype()) return false;
+ if (array_proto != native_context->initial_object_prototype()) return false;
if (array_proto->elements() != heap->empty_fixed_array()) return false;
return array_proto->GetPrototype()->IsNull();
}
@@ -412,7 +426,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
HeapObject* elms = array->elements();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
- if (args == NULL || array->HasFastElements()) return elms;
+ if (args == NULL || array->HasFastObjectElements()) return elms;
if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition =
@@ -422,7 +436,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
- if (args == NULL || array->HasFastElements() ||
+ if (args == NULL || array->HasFastObjectElements() ||
maybe_writable_result->IsFailure()) {
return maybe_writable_result;
}
@@ -448,11 +462,11 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
- Context* global_context = heap->isolate()->context()->global_context();
+ Context* native_context = heap->isolate()->context()->native_context();
JSObject* array_proto =
- JSObject::cast(global_context->array_function()->prototype());
+ JSObject::cast(native_context->array_function()->prototype());
return receiver->GetPrototype() == array_proto &&
- ArrayPrototypeHasNoElements(heap, global_context, array_proto);
+ ArrayPrototypeHasNoElements(heap, native_context, array_proto);
}
@@ -463,7 +477,7 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
HandleScope handleScope(isolate);
Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
+ GetProperty(Handle<JSObject>(isolate->native_context()->builtins()),
name);
Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
int argc = args.length() - 1;
@@ -496,6 +510,10 @@ BUILTIN(ArrayPush) {
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
+ if (FLAG_harmony_observation && array->map()->is_observed()) {
+ return CallJsBuiltin(isolate, "ArrayPush", args);
+ }
+
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
if (to_add == 0) {
@@ -516,8 +534,8 @@ BUILTIN(ArrayPush) {
}
FixedArray* new_elms = FixedArray::cast(obj);
- CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
- new_elms, FAST_ELEMENTS, 0, len);
+ ElementsKind kind = array->GetElementsKind();
+ CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
@@ -552,11 +570,15 @@ BUILTIN(ArrayPop) {
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
+ if (FLAG_harmony_observation && array->map()->is_observed()) {
+ return CallJsBuiltin(isolate, "ArrayPop", args);
+ }
+
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
// Get top element
- MaybeObject* top = elms->get(len - 1);
+ Object* top = elms->get(len - 1);
// Set the length.
array->set_length(Smi::FromInt(len - 1));
@@ -567,9 +589,7 @@ BUILTIN(ArrayPop) {
return top;
}
- top = array->GetPrototype()->GetElement(len - 1);
-
- return top;
+ return array->GetPrototype()->GetElement(len - 1);
}
@@ -588,7 +608,11 @@ BUILTIN(ArrayShift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastTypeElements());
+ ASSERT(array->HasFastSmiOrObjectElements());
+
+ if (FLAG_harmony_observation && array->map()->is_observed()) {
+ return CallJsBuiltin(isolate, "ArrayShift", args);
+ }
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
@@ -630,7 +654,11 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastTypeElements());
+ ASSERT(array->HasFastSmiOrObjectElements());
+
+ if (FLAG_harmony_observation && array->map()->is_observed()) {
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
+ }
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -652,8 +680,8 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
- CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
- new_elms, FAST_ELEMENTS, to_add, len);
+ ElementsKind kind = array->GetElementsKind();
+ CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
@@ -682,7 +710,7 @@ BUILTIN(ArraySlice) {
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
- if (!array->HasFastTypeElements() ||
+ if (!array->HasFastSmiOrObjectElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -693,12 +721,12 @@ BUILTIN(ArraySlice) {
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
Map* arguments_map =
- isolate->context()->global_context()->arguments_boilerplate()->map();
+ isolate->context()->native_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
- && JSObject::cast(receiver)->HasFastTypeElements();
+ && JSObject::cast(receiver)->HasFastSmiOrObjectElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -763,9 +791,9 @@ BUILTIN(ArraySlice) {
JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array;
- CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
+ CopyObjectToObjectElements(elms, elements_kind, k,
FixedArray::cast(result_array->elements()),
- FAST_ELEMENTS, 0, result_len);
+ elements_kind, 0, result_len);
return result_array;
}
@@ -786,7 +814,11 @@ BUILTIN(ArraySplice) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastTypeElements());
+ ASSERT(array->HasFastSmiOrObjectElements());
+
+ if (FLAG_harmony_observation && array->map()->is_observed()) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
+ }
int len = Smi::cast(array->length())->value();
@@ -837,9 +869,9 @@ BUILTIN(ArraySplice) {
{
// Fill newly created array.
- CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
+ CopyObjectToObjectElements(elms, elements_kind, actual_start,
FixedArray::cast(result_array->elements()),
- FAST_ELEMENTS, 0, actual_delete_count);
+ elements_kind, 0, actual_delete_count);
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
@@ -888,12 +920,13 @@ BUILTIN(ArraySplice) {
{
// Copy the part before actual_start as is.
- CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
- new_elms, FAST_ELEMENTS, 0, actual_start);
+ ElementsKind kind = array->GetElementsKind();
+ CopyObjectToObjectElements(elms, kind, 0,
+ new_elms, kind, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
- CopyObjectToObjectElements(elms, FAST_ELEMENTS,
+ CopyObjectToObjectElements(elms, kind,
actual_start + actual_delete_count,
- new_elms, FAST_ELEMENTS,
+ new_elms, kind,
actual_start + item_count, to_copy);
}
@@ -929,10 +962,10 @@ BUILTIN(ArraySplice) {
BUILTIN(ArrayConcat) {
Heap* heap = isolate->heap();
- Context* global_context = isolate->context()->global_context();
+ Context* native_context = isolate->context()->native_context();
JSObject* array_proto =
- JSObject::cast(global_context->array_function()->prototype());
- if (!ArrayPrototypeHasNoElements(heap, global_context, array_proto)) {
+ JSObject::cast(native_context->array_function()->prototype());
+ if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@@ -940,11 +973,12 @@ BUILTIN(ArrayConcat) {
// and calculating total length.
int n_arguments = args.length();
int result_len = 0;
- ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
+ ElementsKind elements_kind = GetInitialFastElementsKind();
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
- if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
- || JSArray::cast(arg)->GetPrototype() != array_proto) {
+ if (!arg->IsJSArray() ||
+ !JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
+ JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@@ -961,8 +995,18 @@ BUILTIN(ArrayConcat) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) {
- elements_kind = FAST_ELEMENTS;
+ if (!JSArray::cast(arg)->HasFastSmiElements()) {
+ if (IsFastSmiElementsKind(elements_kind)) {
+ if (IsFastHoleyElementsKind(elements_kind)) {
+ elements_kind = FAST_HOLEY_ELEMENTS;
+ } else {
+ elements_kind = FAST_ELEMENTS;
+ }
+ }
+ }
+
+ if (JSArray::cast(arg)->HasFastHoleyElements()) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
}
}
@@ -982,8 +1026,8 @@ BUILTIN(ArrayConcat) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
- CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
- result_elms, FAST_ELEMENTS,
+ CopyObjectToObjectElements(elms, elements_kind, 0,
+ result_elms, elements_kind,
start_pos, len);
start_pos += len;
}
@@ -1123,6 +1167,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
+ result->VerifyApiCallResultType();
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
@@ -1199,6 +1244,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
+ result->VerifyApiCallResultType();
}
}
// Check for exceptions and return result.
@@ -1266,6 +1312,11 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
}
+static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
+ LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>());
+}
+
+
static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
KeyedLoadIC::GenerateInitialize(masm);
}
@@ -1363,6 +1414,11 @@ static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
}
+static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
+ StoreStubCompiler::GenerateStoreViaSetter(masm, Handle<JSFunction>());
+}
+
+
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
}
@@ -1582,7 +1638,7 @@ void Builtins::SetUp(bool create_heap_objects) {
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
- union { int force_alignment; byte buffer[4*KB]; } u;
+ union { int force_alignment; byte buffer[8*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
diff --git a/src/3rdparty/v8/src/builtins.h b/src/3rdparty/v8/src/builtins.h
index 3ea3393..a2f752e 100644
--- a/src/3rdparty/v8/src/builtins.h
+++ b/src/3rdparty/v8/src/builtins.h
@@ -38,6 +38,25 @@ enum BuiltinExtraArguments {
};
+#define CODE_AGE_LIST_WITH_ARG(V, A) \
+ V(Quadragenarian, A) \
+ V(Quinquagenarian, A) \
+ V(Sexagenarian, A) \
+ V(Septuagenarian, A) \
+ V(Octogenarian, A)
+
+#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
+
+#define CODE_AGE_LIST(V) \
+ CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+
+#define DECLARE_CODE_AGE_BUILTIN(C, V) \
+ V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
+ UNINITIALIZED, Code::kNoExtraICState) \
+ V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
+ UNINITIALIZED, Code::kNoExtraICState)
+
+
// Define list of builtins implemented in C++.
#define BUILTIN_LIST_C(V) \
V(Illegal, NO_EXTRA_ARGUMENTS) \
@@ -66,6 +85,8 @@ enum BuiltinExtraArguments {
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
@@ -80,6 +101,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
@@ -119,6 +142,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
+ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
+ Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -153,6 +178,8 @@ enum BuiltinExtraArguments {
kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
+ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
+ kStrictMode) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -187,8 +214,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState)
-
+ Code::kNoExtraICState) \
+ CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
@@ -347,6 +374,8 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
+ static void Generate_InRecompileQueue(MacroAssembler* masm);
+ static void Generate_ParallelRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
@@ -369,6 +398,14 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
+#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ static void Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm); \
+ static void Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm);
+ CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
+
static void InitBuiltinFunctionTable();
bool initialized_;
diff --git a/src/3rdparty/v8/src/checks.h b/src/3rdparty/v8/src/checks.h
index 608aa14..d0a0c2b 100644
--- a/src/3rdparty/v8/src/checks.h
+++ b/src/3rdparty/v8/src/checks.h
@@ -284,4 +284,12 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)
+// "Extra checks" are lightweight checks that are enabled in some release
+// builds.
+#ifdef ENABLE_EXTRA_CHECKS
+#define EXTRA_CHECK(condition) CHECK(condition)
+#else
+#define EXTRA_CHECK(condition) ((void) 0)
+#endif
+
#endif // V8_CHECKS_H_
diff --git a/src/3rdparty/v8/src/code-stubs.cc b/src/3rdparty/v8/src/code-stubs.cc
index 814e358..7a72059 100644
--- a/src/3rdparty/v8/src/code-stubs.cc
+++ b/src/3rdparty/v8/src/code-stubs.cc
@@ -142,7 +142,9 @@ Handle<Code> CodeStub::GetCode() {
}
Activate(code);
- ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
+ ASSERT(!NeedsImmovableCode() ||
+ heap->lo_space()->Contains(code) ||
+ heap->code_space()->FirstPage()->Contains(code->address()));
return Handle<Code>(code, isolate);
}
@@ -172,7 +174,9 @@ void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
Isolate* isolate = new_object->GetIsolate();
Factory* factory = isolate->factory();
return Map::UpdateCodeCache(known_map_,
- factory->compare_ic_symbol(),
+ strict() ?
+ factory->strict_compare_ic_symbol() :
+ factory->compare_ic_symbol(),
new_object);
}
@@ -183,10 +187,16 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
UNINITIALIZED);
+ ASSERT(op_ == Token::EQ || op_ == Token::EQ_STRICT);
Handle<Object> probe(
- known_map_->FindInCodeCache(*factory->compare_ic_symbol(), flags));
+ known_map_->FindInCodeCache(
+ strict() ?
+ *factory->strict_compare_ic_symbol() :
+ *factory->compare_ic_symbol(),
+ flags));
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
+ ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
return true;
}
return false;
@@ -262,10 +272,13 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break;
case EXTERNAL_BYTE_ELEMENTS:
@@ -292,7 +305,9 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS: {
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_,
@@ -300,6 +315,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
}
break;
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_,
grow_mode_);
@@ -430,24 +446,32 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail;
+ ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) {
- if (to_ == FAST_ELEMENTS) {
- if (from_ == FAST_SMI_ONLY_ELEMENTS) {
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
- } else if (from_ == FAST_DOUBLE_ELEMENTS) {
+ if (IsFastSmiOrObjectElementsKind(to_)) {
+ if (IsFastSmiOrObjectElementsKind(from_)) {
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm);
+ } else if (IsFastDoubleElementsKind(from_)) {
+ ASSERT(!IsFastSmiElementsKind(to_));
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
} else {
UNREACHABLE();
}
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_,
- FAST_ELEMENTS,
+ to_,
grow_mode_);
- } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ } else if (IsFastSmiElementsKind(from_) &&
+ IsFastDoubleElementsKind(to_)) {
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_,
grow_mode_);
+ } else if (IsFastDoubleElementsKind(from_)) {
+ ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm);
} else {
UNREACHABLE();
}
@@ -456,4 +480,26 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
}
+
+FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;
+
+
+void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
+ intptr_t stack_pointer) {
+ if (entry_hook_ != NULL)
+ entry_hook_(function, stack_pointer);
+}
+
+
+bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
+ // We don't allow setting a new entry hook over one that's
+ // already active, as the hooks won't stack.
+ if (entry_hook != 0 && entry_hook_ != 0)
+ return false;
+
+ entry_hook_ = entry_hook;
+ return true;
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/code-stubs.h b/src/3rdparty/v8/src/code-stubs.h
index e281a36..8288f4d 100644
--- a/src/3rdparty/v8/src/code-stubs.h
+++ b/src/3rdparty/v8/src/code-stubs.h
@@ -73,7 +73,8 @@ namespace internal {
V(DebuggerStatement) \
V(StringDictionaryLookup) \
V(ElementsTransitionAndStore) \
- V(StoreArrayLiteralElement)
+ V(StoreArrayLiteralElement) \
+ V(ProfileEntryHook)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@@ -162,8 +163,7 @@ class CodeStub BASE_EMBEDDED {
bool FindCodeInCache(Code** code_out);
protected:
- static const int kMajorBits = 6;
- static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
+ static bool CanUseFPRegisters();
private:
// Nonvirtual wrapper around the stub-specific Generate function. Call
@@ -222,8 +222,9 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
- class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
- class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
+ class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
+ class MinorKeyBits: public BitField<uint32_t,
+ kStubMajorKeyBits, kStubMinorKeyBits> {}; // NOLINT
friend class BreakPointIterator;
};
@@ -498,7 +499,7 @@ class ICCompareStub: public CodeStub {
virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_);
- code->set_compare_operation(op_);
+ code->set_compare_operation(op_ - Token::EQ);
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
@@ -1000,13 +1001,15 @@ class KeyedStoreElementStub : public CodeStub {
KeyedAccessGrowMode grow_mode)
: is_js_array_(is_js_array),
elements_kind_(elements_kind),
- grow_mode_(grow_mode) { }
+ grow_mode_(grow_mode),
+ fp_registers_(CanUseFPRegisters()) { }
Major MajorKey() { return KeyedStoreElement; }
int MinorKey() {
return ElementsKindBits::encode(elements_kind_) |
IsJSArrayBits::encode(is_js_array_) |
- GrowModeBits::encode(grow_mode_);
+ GrowModeBits::encode(grow_mode_) |
+ FPRegisters::encode(fp_registers_);
}
void Generate(MacroAssembler* masm);
@@ -1015,10 +1018,12 @@ class KeyedStoreElementStub : public CodeStub {
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {};
class IsJSArrayBits: public BitField<bool, 9, 1> {};
+ class FPRegisters: public BitField<bool, 10, 1> {};
bool is_js_array_;
ElementsKind elements_kind_;
KeyedAccessGrowMode grow_mode_;
+ bool fp_registers_;
DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
};
@@ -1134,17 +1139,55 @@ class ElementsTransitionAndStoreStub : public CodeStub {
class StoreArrayLiteralElementStub : public CodeStub {
public:
- explicit StoreArrayLiteralElementStub() {}
+ StoreArrayLiteralElementStub()
+ : fp_registers_(CanUseFPRegisters()) { }
private:
+ class FPRegisters: public BitField<bool, 0, 1> {};
+
Major MajorKey() { return StoreArrayLiteralElement; }
- int MinorKey() { return 0; }
+ int MinorKey() { return FPRegisters::encode(fp_registers_); }
void Generate(MacroAssembler* masm);
+ bool fp_registers_;
+
DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
};
+
+class ProfileEntryHookStub : public CodeStub {
+ public:
+ explicit ProfileEntryHookStub() {}
+
+ // The profile entry hook function is not allowed to cause a GC.
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ // Generates a call to the entry hook if it's enabled.
+ static void MaybeCallEntryHook(MacroAssembler* masm);
+
+ // Sets or unsets the entry hook function. Returns true on success,
+ // false on an attempt to replace a non-NULL entry hook with another
+ // non-NULL hook.
+ static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
+
+ static bool HasEntryHook() { return entry_hook_ != NULL; }
+
+ private:
+ static void EntryHookTrampoline(intptr_t function,
+ intptr_t stack_pointer);
+
+ Major MajorKey() { return ProfileEntryHook; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ // The current function entry hook.
+ static FunctionEntryHook entry_hook_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/src/3rdparty/v8/src/codegen.h b/src/3rdparty/v8/src/codegen.h
index 50d70f2..08a777f 100644
--- a/src/3rdparty/v8/src/codegen.h
+++ b/src/3rdparty/v8/src/codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -95,8 +95,8 @@ UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
- static void GenerateSmiOnlyToObject(MacroAssembler* masm);
- static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
+ static void GenerateMapChangeElementsTransition(MacroAssembler* masm);
+ static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
private:
diff --git a/src/3rdparty/v8/src/collection.js b/src/3rdparty/v8/src/collection.js
index 75fe3d5..b3c2db7 100644
--- a/src/3rdparty/v8/src/collection.js
+++ b/src/3rdparty/v8/src/collection.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -79,7 +79,31 @@ function SetDelete(key) {
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
- return %SetDelete(this, key);
+ if (%SetHas(this, key)) {
+ %SetDelete(this, key);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+function SetGetSize() {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.size', this]);
+ }
+ return %SetGetSize(this);
+}
+
+
+function SetClear() {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %SetInitialize(this);
}
@@ -124,7 +148,7 @@ function MapHas(key) {
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
- return !IS_UNDEFINED(%MapGet(this, key));
+ return %MapHas(this, key);
}
@@ -136,12 +160,26 @@ function MapDelete(key) {
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
- if (!IS_UNDEFINED(%MapGet(this, key))) {
- %MapSet(this, key, void 0);
- return true;
- } else {
- return false;
+ return %MapDelete(this, key);
+}
+
+
+function MapGetSize() {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.size', this]);
}
+ return %MapGetSize(this);
+}
+
+
+function MapClear() {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %MapInitialize(this);
}
@@ -186,7 +224,7 @@ function WeakMapHas(key) {
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- return !IS_UNDEFINED(%WeakMapGet(this, key));
+ return %WeakMapHas(this, key);
}
@@ -198,12 +236,7 @@ function WeakMapDelete(key) {
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
- if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
- %WeakMapSet(this, key, void 0);
- return true;
- } else {
- return false;
- }
+ return %WeakMapDelete(this, key);
}
// -------------------------------------------------------------------
@@ -220,18 +253,22 @@ function WeakMapDelete(key) {
%SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
// Set up the non-enumerable functions on the Set prototype object.
+ InstallGetter($Set.prototype, "size", SetGetSize);
InstallFunctions($Set.prototype, DONT_ENUM, $Array(
"add", SetAdd,
"has", SetHas,
- "delete", SetDelete
+ "delete", SetDelete,
+ "clear", SetClear
));
// Set up the non-enumerable functions on the Map prototype object.
+ InstallGetter($Map.prototype, "size", MapGetSize);
InstallFunctions($Map.prototype, DONT_ENUM, $Array(
"get", MapGet,
"set", MapSet,
"has", MapHas,
- "delete", MapDelete
+ "delete", MapDelete,
+ "clear", MapClear
));
// Set up the WeakMap constructor function.
diff --git a/src/3rdparty/v8/src/compilation-cache.cc b/src/3rdparty/v8/src/compilation-cache.cc
index 82cc223..904e84f 100644
--- a/src/3rdparty/v8/src/compilation-cache.cc
+++ b/src/3rdparty/v8/src/compilation-cache.cc
@@ -98,7 +98,7 @@ void CompilationSubCache::Age() {
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
- Object* undefined = isolate()->heap()->raw_unchecked_undefined_value();
+ Object* undefined = isolate()->heap()->undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
@@ -165,10 +165,12 @@ bool CompilationCacheScript::HasOrigin(
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
// won't.
-Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
+Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
+ Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset,
+ Handle<Context> context) {
Object* result = NULL;
int generation;
@@ -177,7 +179,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
{ HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
- Handle<Object> probe(table->Lookup(*source), isolate());
+ Handle<Object> probe(table->Lookup(*source, *context), isolate());
if (probe->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe);
@@ -214,7 +216,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
ASSERT(HasOrigin(shared, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
- if (generation != 0) Put(source, shared);
+ if (generation != 0) Put(source, context, shared);
isolate()->counters()->compilation_cache_hits()->Increment();
return shared;
} else {
@@ -226,25 +228,28 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
MaybeObject* CompilationCacheScript::TryTablePut(
Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
Handle<CompilationCacheTable> table = GetFirstTable();
- return table->Put(*source, *function_info);
+ return table->Put(*source, *context, *function_info);
}
Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(isolate(),
- TryTablePut(source, function_info),
+ TryTablePut(source, context, function_info),
CompilationCacheTable);
}
void CompilationCacheScript::Put(Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate());
- SetFirstTable(TablePut(source, function_info));
+ SetFirstTable(TablePut(source, context, function_info));
}
@@ -380,15 +385,17 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
}
-Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
+Handle<SharedFunctionInfo> CompilationCache::LookupScript(
+ Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset,
+ Handle<Context> context) {
if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null();
}
- return script_.Lookup(source, name, line_offset, column_offset);
+ return script_.Lookup(source, name, line_offset, column_offset, context);
}
@@ -426,12 +433,13 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) {
return;
}
- script_.Put(source, function_info);
+ script_.Put(source, context, function_info);
}
diff --git a/src/3rdparty/v8/src/compilation-cache.h b/src/3rdparty/v8/src/compilation-cache.h
index 2f2fbad..7a236e8 100644
--- a/src/3rdparty/v8/src/compilation-cache.h
+++ b/src/3rdparty/v8/src/compilation-cache.h
@@ -98,16 +98,23 @@ class CompilationCacheScript : public CompilationSubCache {
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
- int column_offset);
- void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
+ int column_offset,
+ Handle<Context> context);
+ void Put(Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
- Handle<String> source, Handle<SharedFunctionInfo> function_info);
+ Handle<String> source,
+ Handle<Context> context,
+ Handle<SharedFunctionInfo> function_info);
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
@@ -122,7 +129,7 @@ class CompilationCacheScript : public CompilationSubCache {
// Sub-cache for eval scripts. Two caches for eval are used. One for eval calls
-// in global contexts and one for eval calls in other contexts. The cache
+// in native contexts and one for eval calls in other contexts. The cache
// considers the following pieces of information when checking for matching
// entries:
// 1. The source string.
@@ -204,7 +211,8 @@ class CompilationCache {
Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
Handle<Object> name,
int line_offset,
- int column_offset);
+ int column_offset,
+ Handle<Context> context);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
@@ -223,6 +231,7 @@ class CompilationCache {
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source,
+ Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
diff --git a/src/3rdparty/v8/src/compiler-intrinsics.h b/src/3rdparty/v8/src/compiler-intrinsics.h
index b73e8ac..c1693b0 100644
--- a/src/3rdparty/v8/src/compiler-intrinsics.h
+++ b/src/3rdparty/v8/src/compiler-intrinsics.h
@@ -28,6 +28,10 @@
#ifndef V8_COMPILER_INTRINSICS_H_
#define V8_COMPILER_INTRINSICS_H_
+#if defined(_WIN32_WCE)
+#include <cmnintrin.h>
+#endif
+
namespace v8 {
namespace internal {
@@ -58,7 +62,7 @@ int CompilerIntrinsics::CountSetBits(uint32_t value) {
return __builtin_popcount(value);
}
-#elif defined(_MSC_VER)
+#elif defined(_MSC_VER) && !defined(_WIN32_WCE)
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
@@ -75,6 +79,21 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
return 31 - static_cast<int>(result);
}
+#elif defined(_WIN32_WCE)
+int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
+ // taken from http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightFloatCast
+ float f = (float)(value & -value); // cast the least significant bit in v to a float
+ return (*(uint32_t *)&f >> 23) - 0x7f;
+}
+
+int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
+ return _CountLeadingZeros(value);
+}
+#else
+#error Unsupported compiler
+#endif
+
+#if defined(_MSC_VER)
int CompilerIntrinsics::CountSetBits(uint32_t value) {
// Manually count set bits.
value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
@@ -84,9 +103,6 @@ int CompilerIntrinsics::CountSetBits(uint32_t value) {
value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
return value;
}
-
-#else
-#error Unsupported compiler
#endif
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/compiler.cc b/src/3rdparty/v8/src/compiler.cc
index 4060b15..b1f40b6 100644
--- a/src/3rdparty/v8/src/compiler.cc
+++ b/src/3rdparty/v8/src/compiler.cc
@@ -51,7 +51,7 @@ namespace v8 {
namespace internal {
-CompilationInfo::CompilationInfo(Handle<Script> script)
+CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
: isolate_(script->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE)),
function_(NULL),
@@ -60,12 +60,15 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
script_(script),
extension_(NULL),
pre_parse_data_(NULL),
- osr_ast_id_(AstNode::kNoNumber) {
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(NULL) {
Initialize(BASE);
}
-CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
+CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
+ Zone* zone)
: isolate_(shared_info->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
@@ -76,12 +79,14 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
script_(Handle<Script>(Script::cast(shared_info->script()))),
extension_(NULL),
pre_parse_data_(NULL),
- osr_ast_id_(AstNode::kNoNumber) {
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(NULL) {
Initialize(BASE);
}
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
+CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
: isolate_(closure->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
@@ -93,11 +98,19 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
script_(Handle<Script>(Script::cast(shared_info_->script()))),
extension_(NULL),
pre_parse_data_(NULL),
- osr_ast_id_(AstNode::kNoNumber) {
+ context_(closure->context()),
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(NULL) {
Initialize(BASE);
}
+CompilationInfo::~CompilationInfo() {
+ delete deferred_handles_;
+}
+
+
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
bool is_optimizable_closure =
@@ -118,7 +131,7 @@ bool CompilationInfo::ShouldSelfOptimize() {
FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->flags()->Contains(kDontOptimize) &&
- function()->scope()->AllowsLazyRecompilation() &&
+ function()->scope()->AllowsLazyCompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}
@@ -137,9 +150,8 @@ void CompilationInfo::AbortOptimization() {
// all. However crankshaft support recompilation of functions, so in this case
// the full compiler need not be be used if a debugger is attached, but only if
// break points has actually been set.
-static bool is_debugging_active() {
+static bool IsDebuggerActive(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- Isolate* isolate = Isolate::Current();
return V8::UseCrankshaft() ?
isolate->debug()->has_break_points() :
isolate->debugger()->IsDebuggerActive();
@@ -149,27 +161,32 @@ static bool is_debugging_active() {
}
-static bool AlwaysFullCompiler() {
- return FLAG_always_full_compiler || is_debugging_active();
+static bool AlwaysFullCompiler(Isolate* isolate) {
+ return FLAG_always_full_compiler || IsDebuggerActive(isolate);
}
-static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
+void OptimizingCompiler::RecordOptimizationStats() {
+ Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms_creategraph =
+ static_cast<double>(time_taken_to_create_graph_) / 1000;
+ double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
+ double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
if (FLAG_trace_opt) {
PrintF("[optimizing: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
- PrintF(" - took %0.3f ms]\n", ms);
+ PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
+ ms_codegen);
}
if (FLAG_trace_opt_stats) {
static double compilation_time = 0.0;
static int compiled_functions = 0;
static int code_size = 0;
- compilation_time += ms;
+ compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
compiled_functions++;
code_size += function->shared()->SourceSize();
PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
@@ -180,46 +197,54 @@ static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
}
+// A return value of true indicates the compilation pipeline is still
+// going, not necessarily that we optimized the code.
static bool MakeCrankshaftCode(CompilationInfo* info) {
- // Test if we can optimize this function when asked to. We can only
- // do this after the scopes are computed.
- if (!V8::UseCrankshaft()) {
- info->DisableOptimization();
- }
+ OptimizingCompiler compiler(info);
+ OptimizingCompiler::Status status = compiler.CreateGraph();
- // In case we are not optimizing simply return the code from
- // the full code generator.
- if (!info->IsOptimizing()) {
- return FullCodeGenerator::MakeCode(info);
+ if (status != OptimizingCompiler::SUCCEEDED) {
+ return status != OptimizingCompiler::FAILED;
+ }
+ status = compiler.OptimizeGraph();
+ if (status != OptimizingCompiler::SUCCEEDED) {
+ status = compiler.AbortOptimization();
+ return status != OptimizingCompiler::FAILED;
}
+ status = compiler.GenerateAndInstallCode();
+ return status != OptimizingCompiler::FAILED;
+}
- // We should never arrive here if there is not code object on the
+
+OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
+ ASSERT(V8::UseCrankshaft());
+ ASSERT(info()->IsOptimizing());
+ ASSERT(!info()->IsCompilingForDebugging());
+
+ // We should never arrive here if there is no code object on the
// shared function object.
- Handle<Code> code(info->shared_info()->code());
+ Handle<Code> code(info()->shared_info()->code());
ASSERT(code->kind() == Code::FUNCTION);
// We should never arrive here if optimization has been disabled on the
// shared function info.
- ASSERT(!info->shared_info()->optimization_disabled());
+ ASSERT(!info()->shared_info()->optimization_disabled());
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
- if (AlwaysFullCompiler()) {
- info->SetCode(code);
- return true;
+ if (AlwaysFullCompiler(info()->isolate())) {
+ info()->SetCode(code);
+ return SetLastStatus(BAILED_OUT);
}
// Limit the number of times we re-compile a functions with
// the optimizing compiler.
const int kMaxOptCount =
- FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
- if (info->shared_info()->opt_count() > kMaxOptCount) {
- info->AbortOptimization();
- info->shared_info()->DisableOptimization();
- // True indicates the compilation pipeline is still going, not
- // necessarily that we optimized the code.
- return true;
+ FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
+ if (info()->shared_info()->opt_count() > kMaxOptCount) {
+ info()->set_bailout_reason("optimized too many times");
+ return AbortOptimization();
}
// Due to an encoding limit on LUnallocated operands in the Lithium
@@ -230,27 +255,28 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// The encoding is as a signed value, with parameters and receiver using
// the negative indices and locals the non-negative ones.
const int parameter_limit = -LUnallocated::kMinFixedIndex;
+ Scope* scope = info()->scope();
+ if ((scope->num_parameters() + 1) > parameter_limit) {
+ info()->set_bailout_reason("too many parameters");
+ return AbortOptimization();
+ }
+
const int locals_limit = LUnallocated::kMaxFixedIndex;
- Scope* scope = info->scope();
- if ((scope->num_parameters() + 1) > parameter_limit ||
- (info->osr_ast_id() != AstNode::kNoNumber &&
- scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
- info->AbortOptimization();
- info->shared_info()->DisableOptimization();
- // True indicates the compilation pipeline is still going, not
- // necessarily that we optimized the code.
- return true;
+ if (!info()->osr_ast_id().IsNone() &&
+ scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
+ info()->set_bailout_reason("too many parameters/locals");
+ return AbortOptimization();
}
// Take --hydrogen-filter into account.
- Handle<String> name = info->function()->debug_name();
+ Handle<String> name = info()->function()->debug_name();
if (*FLAG_hydrogen_filter != '\0') {
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
if ((filter[0] == '-'
&& name->IsEqualTo(filter.SubVector(1, filter.length())))
|| (filter[0] != '-' && !name->IsEqualTo(filter))) {
- info->SetCode(code);
- return true;
+ info()->SetCode(code);
+ return SetLastStatus(BAILED_OUT);
}
}
@@ -258,20 +284,21 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler.
- int64_t start = OS::Ticks();
- bool should_recompile = !info->shared_info()->has_deoptimization_support();
+ Timer t(this, &time_taken_to_create_graph_);
+ bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen);
- CompilationInfo unoptimized(info->shared_info());
+ CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
// optimized code.
- unoptimized.SetFunction(info->function());
- unoptimized.SetScope(info->scope());
+ unoptimized.SetFunction(info()->function());
+ unoptimized.SetScope(info()->scope());
+ unoptimized.SetContext(info()->context());
if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
if (should_recompile) {
- if (!succeeded) return false;
- Handle<SharedFunctionInfo> shared = info->shared_info();
+ if (!succeeded) return SetLastStatus(FAILED);
+ Handle<SharedFunctionInfo> shared = info()->shared_info();
shared->EnableDeoptimizationSupport(*unoptimized.code());
// The existing unoptimized code was replaced with the new one.
Compiler::RecordFunctionCompilation(
@@ -285,50 +312,93 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// is safe as long as the unoptimized code has deoptimization
// support.
ASSERT(FLAG_always_opt || code->optimizable());
- ASSERT(info->shared_info()->has_deoptimization_support());
+ ASSERT(info()->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
- HTracer::Instance()->TraceCompilation(info->function());
+ HTracer::Instance()->TraceCompilation(info()->function());
}
-
- Handle<Context> global_context(info->closure()->context()->global_context());
- TypeFeedbackOracle oracle(code, global_context, info->isolate());
- HGraphBuilder builder(info, &oracle);
+ Handle<Context> native_context(
+ info()->closure()->context()->native_context());
+ oracle_ = new(info()->zone()) TypeFeedbackOracle(
+ code, native_context, info()->isolate(), info()->zone());
+ graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
HPhase phase(HPhase::kTotal);
- HGraph* graph = builder.CreateGraph();
- if (info->isolate()->has_pending_exception()) {
- info->SetCode(Handle<Code>::null());
- return false;
+ graph_ = graph_builder_->CreateGraph();
+
+ if (info()->isolate()->has_pending_exception()) {
+ info()->SetCode(Handle<Code>::null());
+ return SetLastStatus(FAILED);
}
- if (graph != NULL) {
- Handle<Code> optimized_code = graph->Compile(info);
- if (!optimized_code.is_null()) {
- info->SetCode(optimized_code);
- FinishOptimization(info->closure(), start);
- return true;
+ // The function being compiled may have bailed out due to an inline
+ // candidate bailing out. In such a case, we don't disable
+ // optimization on the shared_info.
+ ASSERT(!graph_builder_->inline_bailout() || graph_ == NULL);
+ if (graph_ == NULL) {
+ if (graph_builder_->inline_bailout()) {
+ info_->AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
+ } else {
+ return AbortOptimization();
+ }
+ }
+
+ return SetLastStatus(SUCCEEDED);
+}
+
+OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
+ AssertNoAllocation no_gc;
+ NoHandleAllocation no_handles;
+
+ ASSERT(last_status() == SUCCEEDED);
+ Timer t(this, &time_taken_to_optimize_);
+ ASSERT(graph_ != NULL);
+ SmartArrayPointer<char> bailout_reason;
+ if (!graph_->Optimize(&bailout_reason)) {
+ if (!bailout_reason.is_empty()) graph_builder_->Bailout(*bailout_reason);
+ return SetLastStatus(BAILED_OUT);
+ } else {
+ chunk_ = LChunk::NewChunk(graph_);
+ if (chunk_ == NULL) {
+ return SetLastStatus(BAILED_OUT);
}
}
+ return SetLastStatus(SUCCEEDED);
+}
+
- // Keep using the shared code.
- info->AbortOptimization();
- if (!builder.inline_bailout()) {
- // Mark the shared code as unoptimizable unless it was an inlined
- // function that bailed out.
- info->shared_info()->DisableOptimization();
+OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
+ ASSERT(last_status() == SUCCEEDED);
+ Timer timer(this, &time_taken_to_codegen_);
+ ASSERT(chunk_ != NULL);
+ ASSERT(graph_ != NULL);
+ Handle<Code> optimized_code = chunk_->Codegen();
+ if (optimized_code.is_null()) {
+ info()->set_bailout_reason("code generation failed");
+ return AbortOptimization();
}
- // True indicates the compilation pipeline is still going, not necessarily
- // that we optimized the code.
- return true;
+ info()->SetCode(optimized_code);
+ RecordOptimizationStats();
+ return SetLastStatus(SUCCEEDED);
}
static bool GenerateCode(CompilationInfo* info) {
- return info->IsCompilingForDebugging() || !V8::UseCrankshaft() ?
- FullCodeGenerator::MakeCode(info) :
- MakeCrankshaftCode(info);
+ bool is_optimizing = V8::UseCrankshaft() &&
+ !info->IsCompilingForDebugging() &&
+ info->IsOptimizing();
+ if (is_optimizing) {
+ return MakeCrankshaftCode(info);
+ } else {
+ if (info->IsOptimizing()) {
+ // Have the CompilationInfo decide if the compilation should be
+ // BASE or NONOPT.
+ info->DisableOptimization();
+ }
+ return FullCodeGenerator::MakeCode(info);
+ }
}
@@ -346,7 +416,8 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
// the compilation info is set if compilation succeeded.
bool succeeded = MakeCode(info);
if (!info->shared_info().is_null()) {
- Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
+ Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
+ info->zone());
info->shared_info()->set_scope_info(*scope_info);
}
return succeeded;
@@ -356,12 +427,12 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
- ASSERT(!isolate->global_context().is_null());
+ ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
- script->set_context_data((*isolate->global_context())->data());
+ script->set_context_data((*isolate->native_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
@@ -420,7 +491,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
lit->name(),
lit->materialized_literal_count(),
info->code(),
- ScopeInfo::Create(info->scope()));
+ ScopeInfo::Create(info->scope(), info->zone()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
@@ -462,7 +533,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
- live_edit_tracker.RecordFunctionInfo(result, lit);
+ live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
return result;
}
@@ -472,6 +543,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<Object> script_name,
int line_offset,
int column_offset,
+ Handle<Context> context,
v8::Extension* extension,
ScriptDataImpl* pre_data,
Handle<Object> script_data,
@@ -493,7 +565,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
result = compilation_cache->LookupScript(source,
script_name,
line_offset,
- column_offset);
+ column_offset,
+ context);
}
if (result.is_null()) {
@@ -521,22 +594,24 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
: *script_data);
// Compile the function and add it to the cache.
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
+ info.SetContext(context);
if (FLAG_use_strict) {
info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
}
if (compile_flags & v8::Script::QmlMode) info.MarkAsQmlMode();
result = MakeFunctionInfo(&info);
- if (extension == NULL && !result.is_null()) {
- compilation_cache->PutScript(source, result);
+ if (extension == NULL && !result.is_null() && !result->dont_cache()) {
+ compilation_cache->PutScript(source, context, result);
}
} else {
if (result->ic_age() != HEAP->global_ic_age()) {
result->ResetForNewContext(HEAP->global_ic_age());
}
+ result->code()->MakeYoung();
}
if (result.is_null()) isolate->ReportPendingMessages();
@@ -571,17 +646,17 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
if (result.is_null()) {
// Create a script object describing the script to be compiled.
Handle<Script> script = isolate->factory()->NewScript(source);
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
info.SetLanguageMode(language_mode);
if (qml_mode) info.MarkAsQmlMode();
- info.SetCallingContext(context);
+ info.SetContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
// Explicitly disable optimization for eval code. We're not yet prepared
// to handle eval-code in the optimizing compiler.
- result->DisableOptimization();
+ result->DisableOptimization("eval");
// If caller is strict mode, the result must be in strict mode or
// extended mode as well, but not the other way around. Consider:
@@ -591,23 +666,133 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
// extended mode.
ASSERT(language_mode != EXTENDED_MODE ||
result->is_extended_mode());
- compilation_cache->PutEval(
- source, context, is_global, result, scope_position);
+ if (!result->dont_cache()) {
+ compilation_cache->PutEval(
+ source, context, is_global, result, scope_position);
+ }
}
} else {
if (result->ic_age() != HEAP->global_ic_age()) {
result->ResetForNewContext(HEAP->global_ic_age());
}
+ result->code()->MakeYoung();
}
return result;
}
+static bool InstallFullCode(CompilationInfo* info) {
+ // Update the shared function info with the compiled code and the
+ // scope info. Please note, that the order of the shared function
+ // info initialization is important since set_scope_info might
+ // trigger a GC, causing the ASSERT below to be invalid if the code
+ // was flushed. By setting the code object last we avoid this.
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ Handle<Code> code = info->code();
+ Handle<JSFunction> function = info->closure();
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::Create(info->scope(), info->zone());
+ shared->set_scope_info(*scope_info);
+ shared->set_code(*code);
+ if (!function.is_null()) {
+ function->ReplaceCode(*code);
+ ASSERT(!function->IsOptimized());
+ }
+
+ // Set the expected number of properties for instances.
+ FunctionLiteral* lit = info->function();
+ int expected = lit->expected_property_count();
+ SetExpectedNofPropertiesFromEstimate(shared, expected);
+
+ // Set the optimization hints after performing lazy compilation, as
+ // these are not set when the function is set up as a lazily
+ // compiled function.
+ shared->SetThisPropertyAssignmentsInfo(
+ lit->has_only_simple_this_property_assignments(),
+ *lit->this_property_assignments());
+
+ // Check the function has compiled code.
+ ASSERT(shared->is_compiled());
+ shared->set_code_age(0);
+ shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+ shared->set_dont_inline(lit->flags()->Contains(kDontInline));
+ shared->set_ast_node_count(lit->ast_node_count());
+
+ if (V8::UseCrankshaft() &&
+ !function.is_null() &&
+ !shared->optimization_disabled()) {
+ // If we're asked to always optimize, we compile the optimized
+ // version of the function right away - unless the debugger is
+ // active as it makes no sense to compile optimized code then.
+ if (FLAG_always_opt &&
+ !Isolate::Current()->DebuggerHasBreakPoints()) {
+ CompilationInfoWithZone optimized(function);
+ optimized.SetOptimizing(BailoutId::None());
+ return Compiler::CompileLazy(&optimized);
+ }
+ }
+ return true;
+}
+
+
+static void InstallCodeCommon(CompilationInfo* info) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ Handle<Code> code = info->code();
+ ASSERT(!code.is_null());
+
+ // Set optimizable to false if this is disallowed by the shared
+ // function info, e.g., we might have flushed the code and must
+ // reset this bit when lazy compiling the code again.
+ if (shared->optimization_disabled()) code->set_optimizable(false);
+
+ Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+}
+
+
+static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
+ Handle<Code> code = info->code();
+ if (FLAG_cache_optimized_code &&
+ info->osr_ast_id().IsNone() &&
+ code->kind() == Code::OPTIMIZED_FUNCTION) {
+ Handle<JSFunction> function = info->closure();
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<FixedArray> literals(function->literals());
+ Handle<Context> native_context(function->context()->native_context());
+ SharedFunctionInfo::AddToOptimizedCodeMap(
+ shared, native_context, code, literals);
+ }
+}
+
+
+static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
+ if (FLAG_cache_optimized_code &&
+ info->osr_ast_id().IsNone() &&
+ info->IsOptimizing()) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ Handle<JSFunction> function = info->closure();
+ ASSERT(!function.is_null());
+ Handle<Context> native_context(function->context()->native_context());
+ int index = shared->SearchOptimizedCodeMap(*native_context);
+ if (index > 0) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for: ");
+ function->PrintName();
+ PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(*function));
+ }
+ // Caching of optimized code enabled and optimized code found.
+ shared->InstallFromOptimizedCodeMap(*function, index);
+ return true;
+ }
+ }
+ return false;
+}
+
+
bool Compiler::CompileLazy(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER);
@@ -618,6 +803,8 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
+ if (InstallCodeFromOptimizedCodeMap(info)) return true;
+
// Generate the AST for the lazily compiled function.
if (ParserApi::Parse(info, kNoParsingFlags)) {
// Measure how long it takes to do the lazy compilation; only take the
@@ -642,68 +829,17 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
isolate->StackOverflow();
}
} else {
- ASSERT(!info->code().is_null());
- Handle<Code> code = info->code();
- // Set optimizable to false if this is disallowed by the shared
- // function info, e.g., we might have flushed the code and must
- // reset this bit when lazy compiling the code again.
- if (shared->optimization_disabled()) code->set_optimizable(false);
-
- Handle<JSFunction> function = info->closure();
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+ InstallCodeCommon(info);
if (info->IsOptimizing()) {
+ Handle<Code> code = info->code();
ASSERT(shared->scope_info() != ScopeInfo::Empty());
- function->ReplaceCode(*code);
+ info->closure()->ReplaceCode(*code);
+ InsertCodeIntoOptimizedCodeMap(info);
+ return true;
} else {
- // Update the shared function info with the compiled code and the
- // scope info. Please note, that the order of the shared function
- // info initialization is important since set_scope_info might
- // trigger a GC, causing the ASSERT below to be invalid if the code
- // was flushed. By setting the code object last we avoid this.
- Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
- shared->set_scope_info(*scope_info);
- shared->set_code(*code);
- if (!function.is_null()) {
- function->ReplaceCode(*code);
- ASSERT(!function->IsOptimized());
- }
-
- // Set the expected number of properties for instances.
- FunctionLiteral* lit = info->function();
- int expected = lit->expected_property_count();
- SetExpectedNofPropertiesFromEstimate(shared, expected);
-
- // Set the optimization hints after performing lazy compilation, as
- // these are not set when the function is set up as a lazily
- // compiled function.
- shared->SetThisPropertyAssignmentsInfo(
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
-
- // Check the function has compiled code.
- ASSERT(shared->is_compiled());
- shared->set_code_age(0);
- shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
- shared->set_dont_inline(lit->flags()->Contains(kDontInline));
- shared->set_ast_node_count(lit->ast_node_count());
-
- if (V8::UseCrankshaft()&&
- !function.is_null() &&
- !shared->optimization_disabled()) {
- // If we're asked to always optimize, we compile the optimized
- // version of the function right away - unless the debugger is
- // active as it makes no sense to compile optimized code then.
- if (FLAG_always_opt &&
- !Isolate::Current()->DebuggerHasBreakPoints()) {
- CompilationInfo optimized(function);
- optimized.SetOptimizing(AstNode::kNoNumber);
- return CompileLazy(&optimized);
- }
- }
+ return InstallFullCode(info);
}
-
- return true;
}
}
@@ -712,10 +848,97 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
}
+void Compiler::RecompileParallel(Handle<JSFunction> closure) {
+ if (closure->IsInRecompileQueue()) return;
+ ASSERT(closure->IsMarkedForParallelRecompilation());
+
+ Isolate* isolate = closure->GetIsolate();
+ if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** Compilation queue, will retry opting on next run.\n");
+ }
+ return;
+ }
+
+ SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
+ VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
+ PostponeInterruptsScope postpone(isolate);
+
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ int compiled_size = shared->end_position() - shared->start_position();
+ isolate->counters()->total_compile_size()->Increment(compiled_size);
+ info->SetOptimizing(BailoutId::None());
+
+ {
+ CompilationHandleScope handle_scope(*info);
+
+ if (InstallCodeFromOptimizedCodeMap(*info)) return;
+
+ if (ParserApi::Parse(*info, kNoParsingFlags)) {
+ LanguageMode language_mode = info->function()->language_mode();
+ info->SetLanguageMode(language_mode);
+ shared->set_language_mode(language_mode);
+ info->SaveHandles();
+
+ if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
+ OptimizingCompiler* compiler =
+ new(info->zone()) OptimizingCompiler(*info);
+ OptimizingCompiler::Status status = compiler->CreateGraph();
+ if (status == OptimizingCompiler::SUCCEEDED) {
+ isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+ shared->code()->set_profiler_ticks(0);
+ closure->ReplaceCode(isolate->builtins()->builtin(
+ Builtins::kInRecompileQueue));
+ info.Detach();
+ } else if (status == OptimizingCompiler::BAILED_OUT) {
+ isolate->clear_pending_exception();
+ InstallFullCode(*info);
+ }
+ }
+ }
+ }
+
+ if (isolate->has_pending_exception()) {
+ isolate->clear_pending_exception();
+ }
+}
+
+
+void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
+ SmartPointer<CompilationInfo> info(optimizing_compiler->info());
+ // If crankshaft succeeded, install the optimized code else install
+ // the unoptimized code.
+ OptimizingCompiler::Status status = optimizing_compiler->last_status();
+ if (status != OptimizingCompiler::SUCCEEDED) {
+ optimizing_compiler->info()->set_bailout_reason(
+ "failed/bailed out last time");
+ status = optimizing_compiler->AbortOptimization();
+ } else {
+ status = optimizing_compiler->GenerateAndInstallCode();
+ ASSERT(status == OptimizingCompiler::SUCCEEDED ||
+ status == OptimizingCompiler::BAILED_OUT);
+ }
+
+ InstallCodeCommon(*info);
+ if (status == OptimizingCompiler::SUCCEEDED) {
+ Handle<Code> code = info->code();
+ ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty());
+ info->closure()->ReplaceCode(*code);
+ if (info->shared_info()->SearchOptimizedCodeMap(
+ info->closure()->context()->native_context()) == -1) {
+ InsertCodeIntoOptimizedCodeMap(*info);
+ }
+ } else {
+ info->SetCode(Handle<Code>(info->shared_info()->code()));
+ InstallFullCode(*info);
+ }
+}
+
+
Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
Handle<Script> script) {
// Precondition: code has been parsed and scopes have been analyzed.
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
info.SetFunction(literal);
info.SetScope(literal->scope());
info.SetLanguageMode(literal->scope()->language_mode());
@@ -726,19 +949,24 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// builtins cannot be handled lazily by the parser, since we have to know
// if a function uses the special natives syntax, which is something the
// parser records.
+ // If the debugger requests compilation for break points, we cannot be
+ // aggressive about lazy compilation, because it might trigger compilation
+ // of functions without an outer context when setting a breakpoint through
+ // Debug::FindSharedFunctionInfoInScript.
+ bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
bool allow_lazy = literal->AllowsLazyCompilation() &&
- !LiveEditFunctionTracker::IsActive(info.isolate());
+ !LiveEditFunctionTracker::IsActive(info.isolate()) &&
+ (!info.isolate()->DebuggerHasBreakPoints() || allow_lazy_without_ctx);
Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
// Generate code
- if (FLAG_lazy && allow_lazy) {
+ if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
Handle<Code> code = info.isolate()->builtins()->LazyCompile();
info.SetCode(code);
- } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
- (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
+ } else if (GenerateCode(&info)) {
ASSERT(!info.code().is_null());
- scope_info = ScopeInfo::Create(info.scope());
+ scope_info = ScopeInfo::Create(info.scope(), info.zone());
} else {
return Handle<SharedFunctionInfo>::null();
}
@@ -752,12 +980,13 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
SetFunctionInfo(result, literal, false, script);
RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
result->set_allows_lazy_compilation(allow_lazy);
+ result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
// Set the expected number of properties for instances and return
// the resulting function.
SetExpectedNofPropertiesFromEstimate(result,
literal->expected_property_count());
- live_edit_tracker.RecordFunctionInfo(result, literal);
+ live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
return result;
}
@@ -784,6 +1013,8 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+ function_info->set_allows_lazy_compilation_without_context(
+ lit->AllowsLazyCompilationWithoutContext());
function_info->set_language_mode(lit->language_mode());
function_info->set_qml_mode(lit->qml_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
@@ -792,6 +1023,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_is_function(lit->is_function());
function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
+ function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
}
@@ -804,7 +1036,7 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
- if (info->isolate()->logger()->is_logging() ||
+ if (info->isolate()->logger()->is_logging_code_events() ||
CpuProfiler::is_profiling(info->isolate())) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
diff --git a/src/3rdparty/v8/src/compiler.h b/src/3rdparty/v8/src/compiler.h
index 09583c0..b119775 100644
--- a/src/3rdparty/v8/src/compiler.h
+++ b/src/3rdparty/v8/src/compiler.h
@@ -39,16 +39,21 @@ class ScriptDataImpl;
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
-class CompilationInfo BASE_EMBEDDED {
+class CompilationInfo {
public:
- explicit CompilationInfo(Handle<Script> script);
- explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info);
- explicit CompilationInfo(Handle<JSFunction> closure);
+ CompilationInfo(Handle<Script> script, Zone* zone);
+ CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
+ CompilationInfo(Handle<JSFunction> closure, Zone* zone);
+
+ virtual ~CompilationInfo();
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
return isolate_;
}
+ Zone* zone() {
+ return zone_;
+ }
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
@@ -68,8 +73,8 @@ class CompilationInfo BASE_EMBEDDED {
Handle<Script> script() const { return script_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
- Handle<Context> calling_context() const { return calling_context_; }
- int osr_ast_id() const { return osr_ast_id_; }
+ Handle<Context> context() const { return context_; }
+ BailoutId osr_ast_id() const { return osr_ast_id_; }
void MarkAsEval() {
ASSERT(!is_lazy());
@@ -119,13 +124,8 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy());
pre_parse_data_ = pre_parse_data;
}
- void SetCallingContext(Handle<Context> context) {
- ASSERT(is_eval());
- calling_context_ = context;
- }
- void SetOsrAstId(int osr_ast_id) {
- ASSERT(IsOptimizing());
- osr_ast_id_ = osr_ast_id;
+ void SetContext(Handle<Context> context) {
+ context_ = context;
}
void MarkCompilingForDebugging(Handle<Code> current_code) {
ASSERT(mode_ != OPTIMIZE);
@@ -142,17 +142,18 @@ class CompilationInfo BASE_EMBEDDED {
}
bool has_global_object() const {
- return !closure().is_null() && (closure()->context()->global() != NULL);
+ return !closure().is_null() &&
+ (closure()->context()->global_object() != NULL);
}
GlobalObject* global_object() const {
- return has_global_object() ? closure()->context()->global() : NULL;
+ return has_global_object() ? closure()->context()->global_object() : NULL;
}
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
- void SetOptimizing(int osr_ast_id) {
+ void SetOptimizing(BailoutId osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
}
@@ -174,6 +175,21 @@ class CompilationInfo BASE_EMBEDDED {
// current compilation pipeline.
void AbortOptimization();
+ void set_deferred_handles(DeferredHandles* deferred_handles) {
+ ASSERT(deferred_handles_ == NULL);
+ deferred_handles_ = deferred_handles;
+ }
+
+ void SaveHandles() {
+ SaveHandle(&closure_);
+ SaveHandle(&shared_info_);
+ SaveHandle(&context_);
+ SaveHandle(&script_);
+ }
+
+ const char* bailout_reason() const { return bailout_reason_; }
+ void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
+
private:
Isolate* isolate_;
@@ -188,8 +204,6 @@ class CompilationInfo BASE_EMBEDDED {
NONOPT
};
- CompilationInfo() : function_(NULL) {}
-
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
ASSERT(!script_.is_null());
@@ -203,6 +217,7 @@ class CompilationInfo BASE_EMBEDDED {
if (!shared_info_.is_null() && shared_info_->qml_mode()) {
MarkAsQmlMode();
}
+ set_bailout_reason("unknown");
}
void SetMode(Mode mode) {
@@ -254,18 +269,148 @@ class CompilationInfo BASE_EMBEDDED {
v8::Extension* extension_;
ScriptDataImpl* pre_parse_data_;
- // The context of the caller is needed for eval code, and will be a null
- // handle otherwise.
- Handle<Context> calling_context_;
+ // The context of the caller for eval code, and the global context for a
+ // global script. Will be a null handle otherwise.
+ Handle<Context> context_;
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
- int osr_ast_id_;
+ BailoutId osr_ast_id_;
+
+ // The zone from which the compilation pipeline working on this
+ // CompilationInfo allocates.
+ Zone* zone_;
+
+ DeferredHandles* deferred_handles_;
+
+ template<typename T>
+ void SaveHandle(Handle<T> *object) {
+ if (!object->is_null()) {
+ Handle<T> handle(*(*object));
+ *object = handle;
+ }
+ }
+
+ const char* bailout_reason_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
+// Exactly like a CompilationInfo, except also creates and enters a
+// Zone on construction and deallocates it on exit.
+class CompilationInfoWithZone: public CompilationInfo {
+ public:
+ explicit CompilationInfoWithZone(Handle<Script> script)
+ : CompilationInfo(script, &zone_),
+ zone_(script->GetIsolate()),
+ zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
+ : CompilationInfo(shared_info, &zone_),
+ zone_(shared_info->GetIsolate()),
+ zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ explicit CompilationInfoWithZone(Handle<JSFunction> closure)
+ : CompilationInfo(closure, &zone_),
+ zone_(closure->GetIsolate()),
+ zone_scope_(&zone_, DELETE_ON_EXIT) {}
+
+ private:
+ Zone zone_;
+ ZoneScope zone_scope_;
+};
+
+
+// A wrapper around a CompilationInfo that detaches the Handles from
+// the underlying DeferredHandleScope and stores them in info_ on
+// destruction.
+class CompilationHandleScope BASE_EMBEDDED {
+ public:
+ explicit CompilationHandleScope(CompilationInfo* info)
+ : deferred_(info->isolate()), info_(info) {}
+ ~CompilationHandleScope() {
+ info_->set_deferred_handles(deferred_.Detach());
+ }
+
+ private:
+ DeferredHandleScope deferred_;
+ CompilationInfo* info_;
+};
+
+
+class HGraph;
+class HGraphBuilder;
+class LChunk;
+
+// A helper class that calls the three compilation phases in
+// Crankshaft and keeps track of its state. The three phases
+// CreateGraph, OptimizeGraph and GenerateAndInstallCode can either
+// fail, bail-out to the full code generator or succeed. Apart from
+// their return value, the status of the phase last run can be checked
+// using last_status().
+class OptimizingCompiler: public ZoneObject {
+ public:
+ explicit OptimizingCompiler(CompilationInfo* info)
+ : info_(info),
+ oracle_(NULL),
+ graph_builder_(NULL),
+ graph_(NULL),
+ chunk_(NULL),
+ time_taken_to_create_graph_(0),
+ time_taken_to_optimize_(0),
+ time_taken_to_codegen_(0),
+ last_status_(FAILED) { }
+
+ enum Status {
+ FAILED, BAILED_OUT, SUCCEEDED
+ };
+
+ MUST_USE_RESULT Status CreateGraph();
+ MUST_USE_RESULT Status OptimizeGraph();
+ MUST_USE_RESULT Status GenerateAndInstallCode();
+
+ Status last_status() const { return last_status_; }
+ CompilationInfo* info() const { return info_; }
+
+ MUST_USE_RESULT Status AbortOptimization() {
+ info_->AbortOptimization();
+ info_->shared_info()->DisableOptimization(info_->bailout_reason());
+ return SetLastStatus(BAILED_OUT);
+ }
+
+ private:
+ CompilationInfo* info_;
+ TypeFeedbackOracle* oracle_;
+ HGraphBuilder* graph_builder_;
+ HGraph* graph_;
+ LChunk* chunk_;
+ int64_t time_taken_to_create_graph_;
+ int64_t time_taken_to_optimize_;
+ int64_t time_taken_to_codegen_;
+ Status last_status_;
+
+ MUST_USE_RESULT Status SetLastStatus(Status status) {
+ last_status_ = status;
+ return last_status_;
+ }
+ void RecordOptimizationStats();
+
+ struct Timer {
+ Timer(OptimizingCompiler* compiler, int64_t* location)
+ : compiler_(compiler),
+ start_(OS::Ticks()),
+ location_(location) { }
+
+ ~Timer() {
+ *location_ += (OS::Ticks() - start_);
+ }
+
+ OptimizingCompiler* compiler_;
+ int64_t start_;
+ int64_t* location_;
+ };
+};
+
+
// The V8 compiler
//
// General strategy: Source code is translated into an anonymous function w/o
@@ -279,10 +424,6 @@ class CompilationInfo BASE_EMBEDDED {
class Compiler : public AllStatic {
public:
- // Default maximum number of function optimization attempts before we
- // give up.
- static const int kDefaultMaxOptCount = 10;
-
static const int kMaxInliningLevels = 3;
// Call count before primitive functions trigger their own optimization.
@@ -297,6 +438,7 @@ class Compiler : public AllStatic {
Handle<Object> script_name,
int line_offset,
int column_offset,
+ Handle<Context> context,
v8::Extension* extension,
ScriptDataImpl* pre_data,
Handle<Object> script_data,
@@ -315,6 +457,8 @@ class Compiler : public AllStatic {
// success and false if the compilation resulted in a stack overflow.
static bool CompileLazy(CompilationInfo* info);
+ static void RecompileParallel(Handle<JSFunction> function);
+
// Compile a shared function info object (the function is possibly lazily
// compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
@@ -326,6 +470,8 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
+ static void InstallOptimizedCode(OptimizingCompiler* info);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
#endif
diff --git a/src/3rdparty/v8/src/contexts.cc b/src/3rdparty/v8/src/contexts.cc
index cf07cc6..662e326 100644
--- a/src/3rdparty/v8/src/contexts.cc
+++ b/src/3rdparty/v8/src/contexts.cc
@@ -36,7 +36,7 @@ namespace internal {
Context* Context::declaration_context() {
Context* current = this;
- while (!current->IsFunctionContext() && !current->IsGlobalContext()) {
+ while (!current->IsFunctionContext() && !current->IsNativeContext()) {
current = current->previous();
ASSERT(current->closure() == closure());
}
@@ -45,7 +45,7 @@ Context* Context::declaration_context() {
JSBuiltinsObject* Context::builtins() {
- GlobalObject* object = global();
+ GlobalObject* object = global_object();
if (object->IsJSGlobalObject()) {
return JSGlobalObject::cast(object)->builtins();
} else {
@@ -55,19 +55,19 @@ JSBuiltinsObject* Context::builtins() {
}
-Context* Context::global_context() {
+Context* Context::native_context() {
// Fast case: the global object for this context has been set. In
// that case, the global object has a direct pointer to the global
// context.
- if (global()->IsGlobalObject()) {
- return global()->global_context();
+ if (global_object()->IsGlobalObject()) {
+ return global_object()->native_context();
}
// During bootstrapping, the global object might not be set and we
- // have to search the context chain to find the global context.
+ // have to search the context chain to find the native context.
ASSERT(Isolate::Current()->bootstrapper()->IsActive());
Context* current = this;
- while (!current->IsGlobalContext()) {
+ while (!current->IsNativeContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
current = Context::cast(closure->context());
}
@@ -76,11 +76,11 @@ Context* Context::global_context() {
JSObject* Context::global_proxy() {
- return global_context()->global_proxy_object();
+ return native_context()->global_proxy_object();
}
void Context::set_global_proxy(JSObject* object) {
- global_context()->set_global_proxy_object(object);
+ native_context()->set_global_proxy_object(object);
}
@@ -109,17 +109,17 @@ Handle<Object> Context::Lookup(Handle<String> name,
do {
if (FLAG_trace_contexts) {
PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
- if (context->IsGlobalContext()) PrintF(" (global context)");
+ if (context->IsNativeContext()) PrintF(" (native context)");
PrintF("\n");
}
- if (qml_global.is_null() && !context->qml_global()->IsUndefined()) {
- qml_global = Handle<JSObject>(context->qml_global(), isolate);
- qml_global_global = Handle<JSObject>(context->global(), isolate);
+ if (qml_global.is_null() && !context->qml_global_object()->IsUndefined()) {
+ qml_global = Handle<JSObject>(context->qml_global_object(), isolate);
+ qml_global_global = Handle<JSObject>(context->global_object(), isolate);
}
// 1. Check global objects, subjects of with, and extension objects.
- if (context->IsGlobalContext() ||
+ if (context->IsNativeContext() ||
context->IsWithContext() ||
(context->IsFunctionContext() && context->has_extension())) {
Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
@@ -234,7 +234,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
// 3. Prepare to continue with the previous (next outermost) context.
- if (context->IsGlobalContext()) {
+ if (context->IsNativeContext()) {
follow_context_chain = false;
} else {
context = Handle<Context>(context->previous(), isolate);
@@ -276,19 +276,19 @@ Handle<Object> Context::Lookup(Handle<String> name,
void Context::AddOptimizedFunction(JSFunction* function) {
- ASSERT(IsGlobalContext());
+ ASSERT(IsNativeContext());
#ifdef DEBUG
- Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
- while (!element->IsUndefined()) {
- CHECK(element != function);
- element = JSFunction::cast(element)->next_function_link();
+ if (FLAG_enable_slow_asserts) {
+ Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
+ while (!element->IsUndefined()) {
+ CHECK(element != function);
+ element = JSFunction::cast(element)->next_function_link();
+ }
}
- CHECK(function->next_function_link()->IsUndefined());
-
- // Check that the context belongs to the weak global contexts list.
+ // Check that the context belongs to the weak native contexts list.
bool found = false;
- Object* context = GetHeap()->global_contexts_list();
+ Object* context = GetHeap()->native_contexts_list();
while (!context->IsUndefined()) {
if (context == this) {
found = true;
@@ -298,13 +298,23 @@ void Context::AddOptimizedFunction(JSFunction* function) {
}
CHECK(found);
#endif
+
+ // If the function link field is already used then the function was
+ // enqueued as a code flushing candidate and we remove it now.
+ if (!function->next_function_link()->IsUndefined()) {
+ CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
+ flusher->EvictCandidate(function);
+ }
+
+ ASSERT(function->next_function_link()->IsUndefined());
+
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
set(OPTIMIZED_FUNCTIONS_LIST, function);
}
void Context::RemoveOptimizedFunction(JSFunction* function) {
- ASSERT(IsGlobalContext());
+ ASSERT(IsNativeContext());
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
JSFunction* prev = NULL;
while (!element->IsUndefined()) {
@@ -328,7 +338,7 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
Object* Context::OptimizedFunctionsListHead() {
- ASSERT(IsGlobalContext());
+ ASSERT(IsNativeContext());
return get(OPTIMIZED_FUNCTIONS_LIST);
}
@@ -338,11 +348,28 @@ void Context::ClearOptimizedFunctions() {
}
+Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
+ Handle<Object> result(error_message_for_code_gen_from_strings());
+ if (result->IsUndefined()) {
+ const char* error =
+ "Code generation from strings disallowed for this context";
+ Isolate* isolate = Isolate::Current();
+ result = isolate->factory()->NewStringFromAscii(i::CStrVector(error));
+ }
+ return result;
+}
+
+
#ifdef DEBUG
-bool Context::IsBootstrappingOrContext(Object* object) {
+bool Context::IsBootstrappingOrValidParentContext(
+ Object* object, Context* child) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
- return Isolate::Current()->bootstrapper()->IsActive() || object->IsContext();
+ if (Isolate::Current()->bootstrapper()->IsActive()) return true;
+ if (!object->IsContext()) return false;
+ Context* context = Context::cast(object);
+ return context->IsNativeContext() || context->IsGlobalContext() ||
+ context->IsModuleContext() || !child->IsModuleContext();
}
diff --git a/src/3rdparty/v8/src/contexts.h b/src/3rdparty/v8/src/contexts.h
index 18f73d9..61e6c66 100644
--- a/src/3rdparty/v8/src/contexts.h
+++ b/src/3rdparty/v8/src/contexts.h
@@ -96,7 +96,7 @@ enum BindingFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
-#define GLOBAL_CONTEXT_FIELDS(V) \
+#define NATIVE_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
@@ -106,9 +106,7 @@ enum BindingFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
- V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \
- V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
- V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
+ V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@@ -156,12 +154,16 @@ enum BindingFlags {
V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+ V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
+ error_message_for_code_gen_from_strings) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
- V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
+ V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
+ V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
+ V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@@ -192,16 +194,19 @@ enum BindingFlags {
// Dynamically declared variables/functions are also added
// to lazily allocated extension object. Context::Lookup
// searches the extension object for properties.
+// For global and block contexts, contains the respective
+// ScopeInfo.
+// For module contexts, points back to the respective JSModule.
//
-// [ global ] A pointer to the global object. Provided for quick
+// [ global_object ] A pointer to the global object. Provided for quick
// access to the global object from inside the code (since
// we always have a context pointer).
//
// In addition, function contexts may have statically allocated context slots
// to store local variables/functions that are accessed from inner functions
// (via static context addresses) or through 'eval' (dynamic context lookups).
-// Finally, the global context contains additional slots for fast access to
-// global properties.
+// Finally, the native context contains additional slots for fast access to
+// native properties.
class Context: public FixedArray {
public:
@@ -219,16 +224,16 @@ class Context: public FixedArray {
// The extension slot is used for either the global object (in global
// contexts), eval extension object (function contexts), subject of with
// (with contexts), or the variable name (catch contexts), the serialized
- // scope info (block contexts).
+ // scope info (block contexts), or the module instance (module contexts).
EXTENSION_INDEX,
- QML_GLOBAL_INDEX,
- GLOBAL_INDEX,
+ QML_GLOBAL_OBJECT_INDEX,
+ GLOBAL_OBJECT_INDEX,
MIN_CONTEXT_SLOTS,
// This slot holds the thrown value in catch contexts.
THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
- // These slots are only in global contexts.
+ // These slots are only in native contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
ARGUMENTS_BOILERPLATE_INDEX,
@@ -249,9 +254,7 @@ class Context: public FixedArray {
OBJECT_FUNCTION_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
- SMI_JS_ARRAY_MAP_INDEX,
- DOUBLE_JS_ARRAY_MAP_INDEX,
- OBJECT_JS_ARRAY_MAP_INDEX,
+ JS_ARRAY_MAPS_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX,
@@ -283,11 +286,14 @@ class Context: public FixedArray {
OUT_OF_MEMORY_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
+ ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
- PROXY_ENUMERATE,
+ PROXY_ENUMERATE_INDEX,
+ OBSERVERS_NOTIFY_CHANGE_INDEX,
+ OBSERVERS_DELIVER_CHANGES_INDEX,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
@@ -297,7 +303,7 @@ class Context: public FixedArray {
NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
- GLOBAL_CONTEXT_SLOTS,
+ NATIVE_CONTEXT_SLOTS,
FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
};
@@ -308,7 +314,7 @@ class Context: public FixedArray {
Context* previous() {
Object* result = unchecked_previous();
- ASSERT(IsBootstrappingOrContext(result));
+ ASSERT(IsBootstrappingOrValidParentContext(result, this));
return reinterpret_cast<Context*>(result);
}
void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
@@ -317,19 +323,28 @@ class Context: public FixedArray {
Object* extension() { return get(EXTENSION_INDEX); }
void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
+ JSModule* module() { return JSModule::cast(get(EXTENSION_INDEX)); }
+ void set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
+
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
Context* declaration_context();
- GlobalObject* global() {
- Object* result = get(GLOBAL_INDEX);
+ GlobalObject* global_object() {
+ Object* result = get(GLOBAL_OBJECT_INDEX);
ASSERT(IsBootstrappingOrGlobalObject(result));
return reinterpret_cast<GlobalObject*>(result);
}
- void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
+ void set_global_object(GlobalObject* object) {
+ set(GLOBAL_OBJECT_INDEX, object);
+ }
- JSObject *qml_global() { return reinterpret_cast<JSObject *>(get(QML_GLOBAL_INDEX)); }
- void set_qml_global(JSObject *qml_global) { set(QML_GLOBAL_INDEX, qml_global); }
+ JSObject* qml_global_object() {
+ return reinterpret_cast<JSObject *>(get(QML_GLOBAL_OBJECT_INDEX));
+ }
+ void set_qml_global_object(JSObject *qml_global) {
+ set(QML_GLOBAL_OBJECT_INDEX, qml_global);
+ }
// Returns a JSGlobalProxy object or null.
JSObject* global_proxy();
@@ -338,11 +353,11 @@ class Context: public FixedArray {
// The builtins object.
JSBuiltinsObject* builtins();
- // Compute the global context by traversing the context chain.
- Context* global_context();
+ // Compute the native context by traversing the context chain.
+ Context* native_context();
- // Predicates for context types. IsGlobalContext is defined on Object
- // because we frequently have to know if arbitrary objects are global
+ // Predicates for context types. IsNativeContext is defined on Object
+ // because we frequently have to know if arbitrary objects are natives
// contexts.
bool IsFunctionContext() {
Map* map = this->map();
@@ -364,42 +379,36 @@ class Context: public FixedArray {
Map* map = this->map();
return map == map->GetHeap()->module_context_map();
}
+ bool IsGlobalContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->global_context_map();
+ }
- // Tells whether the global context is marked with out of memory.
+ // Tells whether the native context is marked with out of memory.
inline bool has_out_of_memory();
- // Mark the global context with out of memory.
+ // Mark the native context with out of memory.
inline void mark_out_of_memory();
- // A global context hold a list of all functions which have been optimized.
+ // A native context hold a list of all functions which have been optimized.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
- static int GetContextMapIndexFromElementsKind(
- ElementsKind elements_kind) {
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
- } else if (elements_kind == FAST_ELEMENTS) {
- return Context::OBJECT_JS_ARRAY_MAP_INDEX;
- } else {
- ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
- return Context::SMI_JS_ARRAY_MAP_INDEX;
- }
- }
+ Handle<Object> ErrorMessageForCodeGenerationFromStrings();
-#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
+#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \
- ASSERT(IsGlobalContext()); \
+ ASSERT(IsNativeContext()); \
set(index, value); \
} \
type* name() { \
- ASSERT(IsGlobalContext()); \
+ ASSERT(IsNativeContext()); \
return type::cast(get(index)); \
}
- GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
-#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
+ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
+#undef NATIVE_CONTEXT_FIELD_ACCESSORS
// Lookup the slot called name, starting with the current context.
// There are three possibilities:
@@ -429,7 +438,7 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
- static const int kSize = kHeaderSize + GLOBAL_CONTEXT_SLOTS * kPointerSize;
+ static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
// GC support.
typedef FixedBodyDescriptor<
@@ -446,7 +455,7 @@ class Context: public FixedArray {
#ifdef DEBUG
// Bootstrapping-aware type checks.
- static bool IsBootstrappingOrContext(Object* object);
+ static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
static bool IsBootstrappingOrGlobalObject(Object* object);
#endif
};
diff --git a/src/3rdparty/v8/src/conversions-inl.h b/src/3rdparty/v8/src/conversions-inl.h
index 77b260f..e272fe6 100644
--- a/src/3rdparty/v8/src/conversions-inl.h
+++ b/src/3rdparty/v8/src/conversions-inl.h
@@ -51,6 +51,11 @@ inline double JunkStringValue() {
}
+inline double SignedZero(bool negative) {
+ return negative ? uint64_to_double(Double::kSignMask) : 0.0;
+}
+
+
// The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero, or any reasonable value if the argument is larger
// than what fits in an unsigned 32-bit integer.
@@ -263,6 +268,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
if (radix == 0) {
// Radix detection.
+ radix = 10;
if (*current == '0') {
++current;
if (current == end) return SignedZero(negative);
@@ -271,11 +277,8 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
++current;
if (current == end) return JunkStringValue();
} else {
- radix = 8;
leading_zero = true;
}
- } else {
- radix = 10;
}
} else if (radix == 16) {
if (*current == '0') {
@@ -459,16 +462,23 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
- bool negative = false;
+ enum Sign {
+ NONE,
+ NEGATIVE,
+ POSITIVE
+ };
+
+ Sign sign = NONE;
if (*current == '+') {
// Ignore leading sign.
++current;
if (current == end) return JunkStringValue();
+ sign = POSITIVE;
} else if (*current == '-') {
++current;
if (current == end) return JunkStringValue();
- negative = true;
+ sign = NEGATIVE;
}
static const char kInfinitySymbol[] = "Infinity";
@@ -483,34 +493,34 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
}
ASSERT(buffer_pos == 0);
- return negative ? -V8_INFINITY : V8_INFINITY;
+ return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
}
bool leading_zero = false;
if (*current == '0') {
++current;
- if (current == end) return SignedZero(negative);
+ if (current == end) return SignedZero(sign == NEGATIVE);
leading_zero = true;
// It could be hexadecimal value.
if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
- if (current == end || !isDigit(*current, 16)) {
+ if (current == end || !isDigit(*current, 16) || sign != NONE) {
return JunkStringValue(); // "0x".
}
return InternalStringToIntDouble<4>(unicode_cache,
current,
end,
- negative,
+ false,
allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
- if (current == end) return SignedZero(negative);
+ if (current == end) return SignedZero(sign == NEGATIVE);
}
}
@@ -555,7 +565,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
// leading zeros (if any).
while (*current == '0') {
++current;
- if (current == end) return SignedZero(negative);
+ if (current == end) return SignedZero(sign == NEGATIVE);
exponent--; // Move this 0 into the exponent.
}
}
@@ -647,7 +657,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
return InternalStringToIntDouble<3>(unicode_cache,
buffer,
buffer + buffer_pos,
- negative,
+ sign == NEGATIVE,
allow_trailing_junk);
}
@@ -660,7 +670,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
- return negative ? -converted : converted;
+ return (sign == NEGATIVE) ? -converted : converted;
}
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/conversions.h b/src/3rdparty/v8/src/conversions.h
index 70559c9..1fbb5f1 100644
--- a/src/3rdparty/v8/src/conversions.h
+++ b/src/3rdparty/v8/src/conversions.h
@@ -52,8 +52,13 @@ inline bool isDigit(int x, int radix) {
}
-inline double SignedZero(bool negative) {
- return negative ? -0.0 : 0.0;
+// The fast double-to-(unsigned-)int conversion routine does not guarantee
+// rounding towards zero.
+// For NaN and values outside the int range, return INT_MIN or INT_MAX.
+inline int FastD2IChecked(double x) {
+ if (!(x >= INT_MIN)) return INT_MIN; // Negation to catch NaNs.
+ if (x > INT_MAX) return INT_MAX;
+ return static_cast<int>(x);
}
@@ -62,8 +67,6 @@ inline double SignedZero(bool negative) {
// The result is unspecified if x is infinite or NaN, or if the rounded
// integer value is outside the range of type int.
inline int FastD2I(double x) {
- // The static_cast convertion from double to int used to be slow, but
- // as new benchmarks show, now it is much faster than lrint().
return static_cast<int>(x);
}
diff --git a/src/3rdparty/v8/src/counters.cc b/src/3rdparty/v8/src/counters.cc
index faad6d4..811c0aa 100644
--- a/src/3rdparty/v8/src/counters.cc
+++ b/src/3rdparty/v8/src/counters.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -64,9 +64,20 @@ void StatsCounterTimer::Stop() {
counter_.Increment(milliseconds);
}
+void Histogram::AddSample(int sample) {
+ if (Enabled()) {
+ Isolate::Current()->stats_table()->AddHistogramSample(histogram_, sample);
+ }
+}
+
+void* Histogram::CreateHistogram() const {
+ return Isolate::Current()->stats_table()->
+ CreateHistogram(name_, min_, max_, num_buckets_);
+}
+
// Start the timer.
void HistogramTimer::Start() {
- if (GetHistogram() != NULL) {
+ if (histogram_.Enabled()) {
stop_time_ = 0;
start_time_ = OS::Ticks();
}
@@ -74,20 +85,13 @@ void HistogramTimer::Start() {
// Stop the timer and record the results.
void HistogramTimer::Stop() {
- if (histogram_ != NULL) {
+ if (histogram_.Enabled()) {
stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- Isolate::Current()->stats_table()->
- AddHistogramSample(histogram_, milliseconds);
+ histogram_.AddSample(milliseconds);
}
}
-
-void* HistogramTimer::CreateHistogram() const {
- return Isolate::Current()->stats_table()->
- CreateHistogram(name_, 0, 10000, 50);
-}
-
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/counters.h b/src/3rdparty/v8/src/counters.h
index 6498a02..577280f 100644
--- a/src/3rdparty/v8/src/counters.h
+++ b/src/3rdparty/v8/src/counters.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -169,8 +169,7 @@ struct StatsCounter {
protected:
// Returns the cached address of this counter location.
int* GetPtr() {
- if (lookup_done_)
- return ptr_;
+ if (lookup_done_) return ptr_;
lookup_done_ = true;
ptr_ = FindLocationInStatsTable();
return ptr_;
@@ -199,25 +198,30 @@ struct StatsCounterTimer {
}
};
-// A HistogramTimer allows distributions of results to be created
-// HistogramTimer t = { L"foo", NULL, false, 0, 0 };
-struct HistogramTimer {
+// A Histogram represents a dynamically created histogram in the StatsTable.
+//
+// This class is designed to be POD initialized. It will be registered with
+// the histogram system on first use. For example:
+// Histogram h = { "myhist", 0, 10000, 50, NULL, false };
+struct Histogram {
const char* name_;
+ int min_;
+ int max_;
+ int num_buckets_;
void* histogram_;
bool lookup_done_;
- int64_t start_time_;
- int64_t stop_time_;
-
- // Start the timer.
- void Start();
+ // Add a single sample to this histogram.
+ void AddSample(int sample);
- // Stop the timer and record the results.
- void Stop();
+ // Returns true if this histogram is enabled.
+ bool Enabled() {
+ return GetHistogram() != NULL;
+ }
- // Returns true if the timer is running.
- bool Running() {
- return (histogram_ != NULL) && (start_time_ != 0) && (stop_time_ == 0);
+ // Reset the cached internal pointer.
+ void Reset() {
+ lookup_done_ = false;
}
protected:
@@ -234,6 +238,30 @@ struct HistogramTimer {
void* CreateHistogram() const;
};
+// A HistogramTimer allows distributions of results to be created
+// HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 };
+struct HistogramTimer {
+ Histogram histogram_;
+
+ int64_t start_time_;
+ int64_t stop_time_;
+
+ // Start the timer.
+ void Start();
+
+ // Stop the timer and record the results.
+ void Stop();
+
+ // Returns true if the timer is running.
+ bool Running() {
+ return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0);
+ }
+
+ void Reset() {
+ histogram_.Reset();
+ }
+};
+
// Helper class for scoping a HistogramTimer.
class HistogramTimerScope BASE_EMBEDDED {
public:
diff --git a/src/3rdparty/v8/src/cpu-profiler.h b/src/3rdparty/v8/src/cpu-profiler.h
index 6e2e771..9cd4484 100644
--- a/src/3rdparty/v8/src/cpu-profiler.h
+++ b/src/3rdparty/v8/src/cpu-profiler.h
@@ -188,7 +188,7 @@ class ProfilerEventsProcessor : public Thread {
#define PROFILE(isolate, Call) \
- LOG(isolate, Call); \
+ LOG_CODE_EVENT(isolate, Call); \
do { \
if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
v8::internal::CpuProfiler::Call; \
diff --git a/src/3rdparty/v8/src/d8.cc b/src/3rdparty/v8/src/d8.cc
index ddd4100..b3b1bb8 100644
--- a/src/3rdparty/v8/src/d8.cc
+++ b/src/3rdparty/v8/src/d8.cc
@@ -200,7 +200,13 @@ Handle<Value> Shell::Write(const Arguments& args) {
if (i != 0) {
printf(" ");
}
- v8::String::Utf8Value str(args[i]);
+
+ // Explicitly catch potential exceptions in toString().
+ v8::TryCatch try_catch;
+ Handle<String> str_obj = args[i]->ToString();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ v8::String::Utf8Value str(str_obj);
int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
if (n != str.length()) {
printf("Error in fwrite\n");
@@ -284,9 +290,9 @@ Handle<Value> Shell::Load(const Arguments& args) {
return Undefined();
}
-static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
- if (value_in->IsUint32()) {
- return value_in->Uint32Value();
+static int32_t convertToInt(Local<Value> value_in, TryCatch* try_catch) {
+ if (value_in->IsInt32()) {
+ return value_in->Int32Value();
}
Local<Value> number = value_in->ToNumber();
@@ -296,7 +302,15 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
Local<Int32> int32 = number->ToInt32();
if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
- int32_t raw_value = int32->Int32Value();
+ int32_t value = int32->Int32Value();
+ if (try_catch->HasCaught()) return 0;
+
+ return value;
+}
+
+
+static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
+ int32_t raw_value = convertToInt(value_in, try_catch);
if (try_catch->HasCaught()) return 0;
if (raw_value < 0) {
@@ -312,182 +326,463 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
ThrowException(
String::New("Array length exceeds maximum length."));
}
- return static_cast<size_t>(raw_value);
+ return raw_value;
}
-const char kArrayBufferMarkerPropName[] = "_is_array_buffer_";
-const char kArrayBufferReferencePropName[] = "_array_buffer_ref_";
+// TODO(rossberg): should replace these by proper uses of HasInstance,
+// once we figure out a good way to make the templates global.
+const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
+const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
-static const int kExternalArrayAllocationHeaderSize = 2;
-Handle<Value> Shell::CreateExternalArray(const Arguments& args,
- ExternalArrayType type,
- size_t element_size) {
- TryCatch try_catch;
- bool is_array_buffer_construct = element_size == 0;
- if (is_array_buffer_construct) {
- type = v8::kExternalByteArray;
- element_size = 1;
+Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
+ int32_t length) {
+ static const int32_t kMaxSize = 0x7fffffff;
+ // Make sure the total size fits into a (signed) int.
+ if (length < 0 || length > kMaxSize) {
+ return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)"));
+ }
+ uint8_t* data = new uint8_t[length];
+ if (data == NULL) {
+ return ThrowException(String::New("Memory allocation failed"));
}
- ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
- element_size == 8);
+ memset(data, 0, length);
+
+ buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
+ Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
+ persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
+ persistent_array.MarkIndependent();
+ V8::AdjustAmountOfExternalAllocatedMemory(length);
+
+ buffer->SetIndexedPropertiesToExternalArrayData(
+ data, v8::kExternalByteArray, length);
+ buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly);
+
+ return buffer;
+}
+
+
+Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
+ if (!args.IsConstructCall()) {
+ Handle<Value>* rec_args = new Handle<Value>[args.Length()];
+ for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
+ Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
+ delete[] rec_args;
+ return result;
+ }
+
if (args.Length() == 0) {
return ThrowException(
- String::New("Array constructor must have at least one "
- "parameter."));
- }
- bool first_arg_is_array_buffer =
- args[0]->IsObject() &&
- args[0]->ToObject()->Get(
- String::New(kArrayBufferMarkerPropName))->IsTrue();
- // Currently, only the following constructors are supported:
+ String::New("ArrayBuffer constructor must have one argument"));
+ }
+ TryCatch try_catch;
+ int32_t length = convertToUint(args[0], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ return CreateExternalArrayBuffer(args.This(), length);
+}
+
+
+Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
+ Handle<Object> buffer,
+ ExternalArrayType type,
+ int32_t length,
+ int32_t byteLength,
+ int32_t byteOffset,
+ int32_t element_size) {
+ ASSERT(element_size == 1 || element_size == 2 ||
+ element_size == 4 || element_size == 8);
+ ASSERT(byteLength == length * element_size);
+
+ void* data = buffer->GetIndexedPropertiesExternalArrayData();
+ ASSERT(data != NULL);
+
+ array->SetIndexedPropertiesToExternalArrayData(
+ static_cast<uint8_t*>(data) + byteOffset, type, length);
+ array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type));
+ array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
+ array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
+ array->Set(String::New("length"), Int32::New(length), ReadOnly);
+ array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
+ array->Set(String::New("buffer"), buffer, ReadOnly);
+
+ return array;
+}
+
+
+Handle<Value> Shell::CreateExternalArray(const Arguments& args,
+ ExternalArrayType type,
+ int32_t element_size) {
+ if (!args.IsConstructCall()) {
+ Handle<Value>* rec_args = new Handle<Value>[args.Length()];
+ for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
+ Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
+ delete[] rec_args;
+ return result;
+ }
+
+ TryCatch try_catch;
+ ASSERT(element_size == 1 || element_size == 2 ||
+ element_size == 4 || element_size == 8);
+
+ // All of the following constructors are supported:
// TypedArray(unsigned long length)
+ // TypedArray(type[] array)
+ // TypedArray(TypedArray array)
// TypedArray(ArrayBuffer buffer,
// optional unsigned long byteOffset,
// optional unsigned long length)
- if (args.Length() > 3) {
+ Handle<Object> buffer;
+ int32_t length;
+ int32_t byteLength;
+ int32_t byteOffset;
+ bool init_from_array = false;
+ if (args.Length() == 0) {
return ThrowException(
- String::New("Array constructor from ArrayBuffer must "
- "have 1-3 parameters."));
- }
-
- Local<Value> length_value = (args.Length() < 3)
- ? (first_arg_is_array_buffer
- ? args[0]->ToObject()->Get(String::New("byteLength"))
- : args[0])
- : args[2];
- size_t byteLength = convertToUint(length_value, &try_catch);
- size_t length = byteLength;
- if (try_catch.HasCaught()) return try_catch.Exception();
-
- void* data = NULL;
- size_t offset = 0;
-
- Handle<Object> array = Object::New();
- if (first_arg_is_array_buffer) {
- Handle<Object> derived_from = args[0]->ToObject();
- data = derived_from->GetIndexedPropertiesExternalArrayData();
-
- size_t array_buffer_length = convertToUint(
- derived_from->Get(String::New("byteLength")),
- &try_catch);
- if (try_catch.HasCaught()) return try_catch.Exception();
-
- if (data == NULL && array_buffer_length != 0) {
- return ThrowException(
- String::New("ArrayBuffer doesn't have data"));
+ String::New("Array constructor must have at least one argument"));
+ }
+ if (args[0]->IsObject() &&
+ !args[0]->ToObject()->GetHiddenValue(
+ String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
+ // Construct from ArrayBuffer.
+ buffer = args[0]->ToObject();
+ int32_t bufferLength =
+ convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ if (args.Length() < 2 || args[1]->IsUndefined()) {
+ byteOffset = 0;
+ } else {
+ byteOffset = convertToUint(args[1], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (byteOffset > bufferLength) {
+ return ThrowException(String::New("byteOffset out of bounds"));
+ }
+ if (byteOffset % element_size != 0) {
+ return ThrowException(
+ String::New("byteOffset must be multiple of element size"));
+ }
}
- if (args.Length() > 1) {
- offset = convertToUint(args[1], &try_catch);
- if (try_catch.HasCaught()) return try_catch.Exception();
-
- // The given byteOffset must be a multiple of the element size of the
- // specific type, otherwise an exception is raised.
- if (offset % element_size != 0) {
+ if (args.Length() < 3 || args[2]->IsUndefined()) {
+ byteLength = bufferLength - byteOffset;
+ length = byteLength / element_size;
+ if (byteLength % element_size != 0) {
return ThrowException(
- String::New("offset must be multiple of element_size"));
+ String::New("buffer size must be multiple of element size"));
+ }
+ } else {
+ length = convertToUint(args[2], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ byteLength = length * element_size;
+ if (byteOffset + byteLength > bufferLength) {
+ return ThrowException(String::New("length out of bounds"));
}
}
-
- if (offset > array_buffer_length) {
- return ThrowException(
- String::New("byteOffset must be less than ArrayBuffer length."));
+ } else {
+ if (args[0]->IsObject() &&
+ args[0]->ToObject()->Has(String::New("length"))) {
+ // Construct from array.
+ length = convertToUint(
+ args[0]->ToObject()->Get(String::New("length")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ init_from_array = true;
+ } else {
+ // Construct from size.
+ length = convertToUint(args[0], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
}
+ byteLength = length * element_size;
+ byteOffset = 0;
+
+ Handle<Object> global = Context::GetCurrent()->Global();
+ Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer"));
+ ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
+ Handle<Value> buffer_args[] = { Uint32::New(byteLength) };
+ Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
+ 1, buffer_args);
+ if (try_catch.HasCaught()) return result;
+ buffer = result->ToObject();
+ }
- if (args.Length() == 2) {
- // If length is not explicitly specified, the length of the ArrayBuffer
- // minus the byteOffset must be a multiple of the element size of the
- // specific type, or an exception is raised.
- length = array_buffer_length - offset;
- }
+ Handle<Object> array = CreateExternalArray(
+ args.This(), buffer, type, length, byteLength, byteOffset, element_size);
- if (args.Length() != 3) {
- if (length % element_size != 0) {
- return ThrowException(
- String::New("ArrayBuffer length minus the byteOffset must be a "
- "multiple of the element size"));
- }
- length /= element_size;
- }
+ if (init_from_array) {
+ Handle<Object> init = args[0]->ToObject();
+ for (int i = 0; i < length; ++i) array->Set(i, init->Get(i));
+ }
+
+ return array;
+}
- // If a given byteOffset and length references an area beyond the end of
- // the ArrayBuffer an exception is raised.
- if (offset + (length * element_size) > array_buffer_length) {
- return ThrowException(
- String::New("length references an area beyond the end of the "
- "ArrayBuffer"));
- }
- // Hold a reference to the ArrayBuffer so its buffer doesn't get collected.
- array->Set(String::New(kArrayBufferReferencePropName), args[0], ReadOnly);
+Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
+ TryCatch try_catch;
+
+ if (!args.This()->IsObject()) {
+ return ThrowException(
+ String::New("'slice' invoked on non-object receiver"));
}
- if (is_array_buffer_construct) {
- array->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
+ Local<Object> self = args.This();
+ Local<Value> marker =
+ self->GetHiddenValue(String::New(kArrayBufferMarkerPropName));
+ if (marker.IsEmpty()) {
+ return ThrowException(
+ String::New("'slice' invoked on wrong receiver type"));
}
- Persistent<Object> persistent_array = Persistent<Object>::New(array);
- if (data == NULL && length != 0) {
- // Make sure the total size fits into a (signed) int.
- static const int kMaxSize = 0x7fffffff;
- if (length > (kMaxSize - sizeof(size_t)) / element_size) {
- return ThrowException(String::New("Array exceeds maximum size (2G)"));
- }
- // Prepend the size of the allocated chunk to the data itself.
- int total_size = length * element_size +
- kExternalArrayAllocationHeaderSize * sizeof(size_t);
- data = malloc(total_size);
- if (data == NULL) {
- return ThrowException(String::New("Memory allocation failed."));
- }
- *reinterpret_cast<size_t*>(data) = total_size;
- data = reinterpret_cast<size_t*>(data) + kExternalArrayAllocationHeaderSize;
- memset(data, 0, length * element_size);
- V8::AdjustAmountOfExternalAllocatedMemory(total_size);
+ int32_t length =
+ convertToUint(self->Get(String::New("byteLength")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ if (args.Length() == 0) {
+ return ThrowException(
+ String::New("'slice' must have at least one argument"));
+ }
+ int32_t begin = convertToInt(args[0], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (begin < 0) begin += length;
+ if (begin < 0) begin = 0;
+ if (begin > length) begin = length;
+
+ int32_t end;
+ if (args.Length() < 2 || args[1]->IsUndefined()) {
+ end = length;
+ } else {
+ end = convertToInt(args[1], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (end < 0) end += length;
+ if (end < 0) end = 0;
+ if (end > length) end = length;
+ if (end < begin) end = begin;
}
- persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
- persistent_array.MarkIndependent();
- array->SetIndexedPropertiesToExternalArrayData(
- reinterpret_cast<uint8_t*>(data) + offset, type,
- static_cast<int>(length));
- array->Set(String::New("byteLength"),
- Int32::New(static_cast<int32_t>(byteLength)), ReadOnly);
- if (!is_array_buffer_construct) {
- array->Set(String::New("length"),
- Int32::New(static_cast<int32_t>(length)), ReadOnly);
- array->Set(String::New("byteOffset"),
- Int32::New(static_cast<int32_t>(offset)), ReadOnly);
- array->Set(String::New("BYTES_PER_ELEMENT"),
- Int32::New(static_cast<int32_t>(element_size)));
- // We currently support 'buffer' property only if constructed from a buffer.
- if (first_arg_is_array_buffer) {
- array->Set(String::New("buffer"), args[0], ReadOnly);
- }
+ Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
+ Handle<Value> new_args[] = { Uint32::New(end - begin) };
+ Handle<Value> result = constructor->NewInstance(1, new_args);
+ if (try_catch.HasCaught()) return result;
+ Handle<Object> buffer = result->ToObject();
+ uint8_t* dest =
+ static_cast<uint8_t*>(buffer->GetIndexedPropertiesExternalArrayData());
+ uint8_t* src = begin + static_cast<uint8_t*>(
+ self->GetIndexedPropertiesExternalArrayData());
+ memcpy(dest, src, end - begin);
+
+ return buffer;
+}
+
+
+Handle<Value> Shell::ArraySubArray(const Arguments& args) {
+ TryCatch try_catch;
+
+ if (!args.This()->IsObject()) {
+ return ThrowException(
+ String::New("'subarray' invoked on non-object receiver"));
}
- return array;
+
+ Local<Object> self = args.This();
+ Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
+ if (marker.IsEmpty()) {
+ return ThrowException(
+ String::New("'subarray' invoked on wrong receiver type"));
+ }
+
+ Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t length =
+ convertToUint(self->Get(String::New("length")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t byteOffset =
+ convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t element_size =
+ convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ if (args.Length() == 0) {
+ return ThrowException(
+ String::New("'subarray' must have at least one argument"));
+ }
+ int32_t begin = convertToInt(args[0], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (begin < 0) begin += length;
+ if (begin < 0) begin = 0;
+ if (begin > length) begin = length;
+
+ int32_t end;
+ if (args.Length() < 2 || args[1]->IsUndefined()) {
+ end = length;
+ } else {
+ end = convertToInt(args[1], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ if (end < 0) end += length;
+ if (end < 0) end = 0;
+ if (end > length) end = length;
+ if (end < begin) end = begin;
+ }
+
+ length = end - begin;
+ byteOffset += begin * element_size;
+
+ Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
+ Handle<Value> construct_args[] = {
+ buffer, Uint32::New(byteOffset), Uint32::New(length)
+ };
+ return constructor->NewInstance(3, construct_args);
}
-void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
- HandleScope scope;
- Handle<String> prop_name = String::New(kArrayBufferReferencePropName);
- Handle<Object> converted_object = object->ToObject();
- Local<Value> prop_value = converted_object->Get(prop_name);
- if (data != NULL && !prop_value->IsObject()) {
- data = reinterpret_cast<size_t*>(data) - kExternalArrayAllocationHeaderSize;
- V8::AdjustAmountOfExternalAllocatedMemory(
- -static_cast<int>(*reinterpret_cast<size_t*>(data)));
- free(data);
+Handle<Value> Shell::ArraySet(const Arguments& args) {
+ TryCatch try_catch;
+
+ if (!args.This()->IsObject()) {
+ return ThrowException(
+ String::New("'set' invoked on non-object receiver"));
}
- object.Dispose();
+
+ Local<Object> self = args.This();
+ Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
+ if (marker.IsEmpty()) {
+ return ThrowException(
+ String::New("'set' invoked on wrong receiver type"));
+ }
+ int32_t length =
+ convertToUint(self->Get(String::New("length")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t element_size =
+ convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ if (args.Length() == 0) {
+ return ThrowException(
+ String::New("'set' must have at least one argument"));
+ }
+ if (!args[0]->IsObject() ||
+ !args[0]->ToObject()->Has(String::New("length"))) {
+ return ThrowException(
+ String::New("'set' invoked with non-array argument"));
+ }
+ Handle<Object> source = args[0]->ToObject();
+ int32_t source_length =
+ convertToUint(source->Get(String::New("length")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ int32_t offset;
+ if (args.Length() < 2 || args[1]->IsUndefined()) {
+ offset = 0;
+ } else {
+ offset = convertToUint(args[1], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ }
+ if (offset + source_length > length) {
+ return ThrowException(String::New("offset or source length out of bounds"));
+ }
+
+ int32_t source_element_size;
+ if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) {
+ source_element_size = 0;
+ } else {
+ source_element_size =
+ convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ }
+
+ if (element_size == source_element_size &&
+ self->GetConstructor()->StrictEquals(source->GetConstructor())) {
+ // Use memmove on the array buffers.
+ Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ Handle<Object> source_buffer =
+ source->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t byteOffset =
+ convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t source_byteOffset =
+ convertToUint(source->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
+ buffer->GetIndexedPropertiesExternalArrayData());
+ uint8_t* src = source_byteOffset + static_cast<uint8_t*>(
+ source_buffer->GetIndexedPropertiesExternalArrayData());
+ memmove(dest, src, source_length * element_size);
+ } else if (source_element_size == 0) {
+ // Source is not a typed array, copy element-wise sequentially.
+ for (int i = 0; i < source_length; ++i) {
+ self->Set(offset + i, source->Get(i));
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ }
+ } else {
+ // Need to copy element-wise to make the right conversions.
+ Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ Handle<Object> source_buffer =
+ source->Get(String::New("buffer"))->ToObject();
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ if (buffer->StrictEquals(source_buffer)) {
+ // Same backing store, need to handle overlap correctly.
+ // This gets a bit tricky in the case of different element sizes
+ // (which, of course, is extremely unlikely to ever occur in practice).
+ int32_t byteOffset =
+ convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ int32_t source_byteOffset =
+ convertToUint(source->Get(String::New("byteOffset")), &try_catch);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+
+ // Copy as much as we can from left to right.
+ int i = 0;
+ int32_t next_dest_offset = byteOffset + (offset + 1) * element_size;
+ int32_t next_src_offset = source_byteOffset + source_element_size;
+ while (i < length && next_dest_offset <= next_src_offset) {
+ self->Set(offset + i, source->Get(i));
+ ++i;
+ next_dest_offset += element_size;
+ next_src_offset += source_element_size;
+ }
+ // Of what's left, copy as much as we can from right to left.
+ int j = length - 1;
+ int32_t dest_offset = byteOffset + (offset + j) * element_size;
+ int32_t src_offset = source_byteOffset + j * source_element_size;
+ while (j >= i && dest_offset >= src_offset) {
+ self->Set(offset + j, source->Get(j));
+ --j;
+ dest_offset -= element_size;
+ src_offset -= source_element_size;
+ }
+ // There can be at most 8 entries left in the middle that need buffering
+ // (because the largest element_size is 8 times the smallest).
+ ASSERT(j+1 - i <= 8);
+ Handle<Value> temp[8];
+ for (int k = i; k <= j; ++k) {
+ temp[k - i] = source->Get(k);
+ }
+ for (int k = i; k <= j; ++k) {
+ self->Set(offset + k, temp[k - i]);
+ }
+ } else {
+ // Different backing stores, safe to copy element-wise sequentially.
+ for (int i = 0; i < source_length; ++i)
+ self->Set(offset + i, source->Get(i));
+ }
+ }
+
+ return Undefined();
}
-Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
- return CreateExternalArray(args, v8::kExternalByteArray, 0);
+void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
+ HandleScope scope;
+ int32_t length =
+ object->ToObject()->Get(String::New("byteLength"))->Uint32Value();
+ V8::AdjustAmountOfExternalAllocatedMemory(-length);
+ delete[] static_cast<uint8_t*>(data);
+ object.Dispose();
}
@@ -507,8 +802,8 @@ Handle<Value> Shell::Int16Array(const Arguments& args) {
Handle<Value> Shell::Uint16Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalUnsignedShortArray,
- sizeof(uint16_t));
+ return CreateExternalArray(
+ args, kExternalUnsignedShortArray, sizeof(uint16_t));
}
@@ -523,18 +818,18 @@ Handle<Value> Shell::Uint32Array(const Arguments& args) {
Handle<Value> Shell::Float32Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalFloatArray,
- sizeof(float)); // NOLINT
+ return CreateExternalArray(
+ args, kExternalFloatArray, sizeof(float)); // NOLINT
}
Handle<Value> Shell::Float64Array(const Arguments& args) {
- return CreateExternalArray(args, kExternalDoubleArray,
- sizeof(double)); // NOLINT
+ return CreateExternalArray(
+ args, kExternalDoubleArray, sizeof(double)); // NOLINT
}
-Handle<Value> Shell::PixelArray(const Arguments& args) {
+Handle<Value> Shell::Uint8ClampedArray(const Arguments& args) {
return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
}
@@ -764,7 +1059,7 @@ void Shell::InstallUtilityScript() {
i::Debug* debug = i::Isolate::Current()->debug();
debug->Load();
i::Handle<i::JSObject> js_debug
- = i::Handle<i::JSObject>(debug->debug_context()->global());
+ = i::Handle<i::JSObject>(debug->debug_context()->global_object());
utility_context_->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
debug->debug_context()->set_security_token(HEAP->undefined_value());
@@ -829,13 +1124,32 @@ class BZip2Decompressor : public v8::StartupDataDecompressor {
};
#endif
+
+Handle<FunctionTemplate> Shell::CreateArrayBufferTemplate(
+ InvocationCallback fun) {
+ Handle<FunctionTemplate> buffer_template = FunctionTemplate::New(fun);
+ Local<Template> proto_template = buffer_template->PrototypeTemplate();
+ proto_template->Set(String::New("slice"),
+ FunctionTemplate::New(ArrayBufferSlice));
+ return buffer_template;
+}
+
+
+Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
+ Handle<FunctionTemplate> array_template = FunctionTemplate::New(fun);
+ Local<Template> proto_template = array_template->PrototypeTemplate();
+ proto_template->Set(String::New("set"), FunctionTemplate::New(ArraySet));
+ proto_template->Set(String::New("subarray"),
+ FunctionTemplate::New(ArraySubArray));
+ return array_template;
+}
+
+
Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
- global_template->Set(String::New("readbinary"),
- FunctionTemplate::New(ReadBinary));
global_template->Set(String::New("readbuffer"),
FunctionTemplate::New(ReadBuffer));
global_template->Set(String::New("readline"),
@@ -849,26 +1163,28 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
FunctionTemplate::New(DisableProfiler));
// Bind the handlers for external arrays.
+ PropertyAttribute attr =
+ static_cast<PropertyAttribute>(ReadOnly | DontDelete);
global_template->Set(String::New("ArrayBuffer"),
- FunctionTemplate::New(ArrayBuffer));
+ CreateArrayBufferTemplate(ArrayBuffer), attr);
global_template->Set(String::New("Int8Array"),
- FunctionTemplate::New(Int8Array));
+ CreateArrayTemplate(Int8Array), attr);
global_template->Set(String::New("Uint8Array"),
- FunctionTemplate::New(Uint8Array));
+ CreateArrayTemplate(Uint8Array), attr);
global_template->Set(String::New("Int16Array"),
- FunctionTemplate::New(Int16Array));
+ CreateArrayTemplate(Int16Array), attr);
global_template->Set(String::New("Uint16Array"),
- FunctionTemplate::New(Uint16Array));
+ CreateArrayTemplate(Uint16Array), attr);
global_template->Set(String::New("Int32Array"),
- FunctionTemplate::New(Int32Array));
+ CreateArrayTemplate(Int32Array), attr);
global_template->Set(String::New("Uint32Array"),
- FunctionTemplate::New(Uint32Array));
+ CreateArrayTemplate(Uint32Array), attr);
global_template->Set(String::New("Float32Array"),
- FunctionTemplate::New(Float32Array));
+ CreateArrayTemplate(Float32Array), attr);
global_template->Set(String::New("Float64Array"),
- FunctionTemplate::New(Float64Array));
- global_template->Set(String::New("PixelArray"),
- FunctionTemplate::New(PixelArray));
+ CreateArrayTemplate(Float64Array), attr);
+ global_template->Set(String::New("Uint8ClampedArray"),
+ CreateArrayTemplate(Uint8ClampedArray), attr);
#ifdef LIVE_OBJECT_LIST
global_template->Set(String::New("lol_is_enabled"), True());
@@ -901,7 +1217,7 @@ void Shell::Initialize() {
// Set up counters
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(i::FLAG_map_counters);
- if (i::FLAG_dump_counters) {
+ if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
V8::SetCounterFunction(LookupCounter);
V8::SetCreateHistogramFunction(CreateHistogram);
V8::SetAddHistogramSampleFunction(AddHistogramSample);
@@ -991,20 +1307,24 @@ void Shell::OnExit() {
counters[j].key = i.CurrentKey();
}
qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys);
- printf("+--------------------------------------------+-------------+\n");
- printf("| Name | Value |\n");
- printf("+--------------------------------------------+-------------+\n");
+ printf("+----------------------------------------------------------------+"
+ "-------------+\n");
+ printf("| Name |"
+ " Value |\n");
+ printf("+----------------------------------------------------------------+"
+ "-------------+\n");
for (j = 0; j < number_of_counters; j++) {
Counter* counter = counters[j].counter;
const char* key = counters[j].key;
if (counter->is_histogram()) {
- printf("| c:%-40s | %11i |\n", key, counter->count());
- printf("| t:%-40s | %11i |\n", key, counter->sample_total());
+ printf("| c:%-60s | %11i |\n", key, counter->count());
+ printf("| t:%-60s | %11i |\n", key, counter->sample_total());
} else {
- printf("| %-42s | %11i |\n", key, counter->count());
+ printf("| %-62s | %11i |\n", key, counter->count());
}
}
- printf("+--------------------------------------------+-------------+\n");
+ printf("+----------------------------------------------------------------+"
+ "-------------+\n");
delete [] counters;
}
delete counters_file_;
@@ -1056,45 +1376,29 @@ static char* ReadChars(const char* name, int* size_out) {
}
-Handle<Value> Shell::ReadBinary(const Arguments& args) {
- String::Utf8Value filename(args[0]);
- int size;
- if (*filename == NULL) {
- return ThrowException(String::New("Error loading file"));
- }
- char* chars = ReadChars(*filename, &size);
- if (chars == NULL) {
- return ThrowException(String::New("Error reading file"));
- }
- // We skip checking the string for UTF8 characters and use it raw as
- // backing store for the external string with 8-bit characters.
- BinaryResource* resource = new BinaryResource(chars, size);
- return String::NewExternal(resource);
-}
-
-
Handle<Value> Shell::ReadBuffer(const Arguments& args) {
+ ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
return ThrowException(String::New("Error loading file"));
}
- char* data = ReadChars(*filename, &length);
+
+ uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
if (data == NULL) {
return ThrowException(String::New("Error reading file"));
}
-
Handle<Object> buffer = Object::New();
- buffer->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
-
+ buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
persistent_buffer.MarkIndependent();
+ V8::AdjustAmountOfExternalAllocatedMemory(length);
buffer->SetIndexedPropertiesToExternalArrayData(
- reinterpret_cast<uint8_t*>(data), kExternalUnsignedByteArray, length);
+ data, kExternalUnsignedByteArray, length);
buffer->Set(String::New("byteLength"),
- Int32::New(static_cast<int32_t>(length)), ReadOnly);
+ Int32::New(static_cast<int32_t>(length)), ReadOnly);
return buffer;
}
@@ -1259,7 +1563,7 @@ void SourceGroup::Execute() {
Handle<String> SourceGroup::ReadFile(const char* name) {
int size;
- const char* chars = ReadChars(name, &size);
+ char* chars = ReadChars(name, &size);
if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars, size);
delete[] chars;
@@ -1291,6 +1595,11 @@ void SourceGroup::ExecuteInThread() {
Execute();
}
context.Dispose();
+ if (Shell::options.send_idle_notification) {
+ const int kLongIdlePauseInMs = 1000;
+ V8::ContextDisposedNotification();
+ V8::IdleNotification(kLongIdlePauseInMs);
+ }
}
if (done_semaphore_ != NULL) done_semaphore_->Signal();
} while (!Shell::options.last_run);
@@ -1336,6 +1645,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--test") == 0) {
options.test_shell = true;
argv[i] = NULL;
+ } else if (strcmp(argv[i], "--send-idle-notification") == 0) {
+ options.send_idle_notification = true;
+ argv[i] = NULL;
} else if (strcmp(argv[i], "--preemption") == 0) {
#ifdef V8_SHARED
printf("D8 with shared library does not support multi-threading\n");
@@ -1492,13 +1804,11 @@ int Shell::RunMain(int argc, char* argv[]) {
}
if (!options.last_run) {
context.Dispose();
-#if !defined(V8_SHARED)
- if (i::FLAG_send_idle_notification) {
+ if (options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
V8::IdleNotification(kLongIdlePauseInMs);
}
-#endif // !V8_SHARED
}
#ifndef V8_SHARED
diff --git a/src/3rdparty/v8/src/d8.h b/src/3rdparty/v8/src/d8.h
index 23fdebc..a62a81f 100644
--- a/src/3rdparty/v8/src/d8.h
+++ b/src/3rdparty/v8/src/d8.h
@@ -31,7 +31,7 @@
#ifndef V8_SHARED
#include "allocation.h"
#include "hashmap.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "v8.h"
#else
#include "../include/v8.h"
@@ -67,7 +67,7 @@ class CounterCollection {
CounterCollection();
Counter* GetNextCounter();
private:
- static const unsigned kMaxCounters = 256;
+ static const unsigned kMaxCounters = 512;
uint32_t magic_number_;
uint32_t max_counters_;
uint32_t max_name_size_;
@@ -227,6 +227,7 @@ class ShellOptions {
#endif // V8_SHARED
script_executed(false),
last_run(true),
+ send_idle_notification(false),
stress_opt(false),
stress_deopt(false),
interactive_shell(false),
@@ -249,6 +250,7 @@ class ShellOptions {
#endif // V8_SHARED
bool script_executed;
bool last_run;
+ bool send_idle_notification;
bool stress_opt;
bool stress_deopt;
bool interactive_shell;
@@ -307,7 +309,6 @@ class Shell : public i::AllStatic {
static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
- static Handle<Value> ReadBinary(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args);
static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) {
@@ -323,7 +324,10 @@ class Shell : public i::AllStatic {
static Handle<Value> Uint32Array(const Arguments& args);
static Handle<Value> Float32Array(const Arguments& args);
static Handle<Value> Float64Array(const Arguments& args);
- static Handle<Value> PixelArray(const Arguments& args);
+ static Handle<Value> Uint8ClampedArray(const Arguments& args);
+ static Handle<Value> ArrayBufferSlice(const Arguments& args);
+ static Handle<Value> ArraySubArray(const Arguments& args);
+ static Handle<Value> ArraySet(const Arguments& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@@ -384,9 +388,20 @@ class Shell : public i::AllStatic {
static void RunShell();
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate();
+ static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
+ static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
+ static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer,
+ int32_t size);
+ static Handle<Object> CreateExternalArray(Handle<Object> array,
+ Handle<Object> buffer,
+ ExternalArrayType type,
+ int32_t length,
+ int32_t byteLength,
+ int32_t byteOffset,
+ int32_t element_size);
static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type,
- size_t element_size);
+ int32_t element_size);
static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
};
diff --git a/src/3rdparty/v8/src/date.js b/src/3rdparty/v8/src/date.js
index d0e24ab..a54cb23 100644
--- a/src/3rdparty/v8/src/date.js
+++ b/src/3rdparty/v8/src/date.js
@@ -318,7 +318,6 @@ function DateNow() {
// ECMA 262 - 15.9.5.2
function DateToString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this)
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var time_zone_string = LocalTimezoneString(this)
@@ -328,7 +327,6 @@ function DateToString() {
// ECMA 262 - 15.9.5.3
function DateToDateString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return DateString(this);
@@ -337,7 +335,6 @@ function DateToDateString() {
// ECMA 262 - 15.9.5.4
function DateToTimeString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var time_zone_string = LocalTimezoneString(this);
@@ -353,7 +350,6 @@ function DateToLocaleString() {
// ECMA 262 - 15.9.5.6
function DateToLocaleDateString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return LongDateString(this);
@@ -362,7 +358,6 @@ function DateToLocaleDateString() {
// ECMA 262 - 15.9.5.7
function DateToLocaleTimeString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return TimeString(this);
@@ -371,133 +366,114 @@ function DateToLocaleTimeString() {
// ECMA 262 - 15.9.5.8
function DateValueOf() {
- CHECK_DATE(this);
return UTC_DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.9
function DateGetTime() {
- CHECK_DATE(this);
return UTC_DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.10
function DateGetFullYear() {
- CHECK_DATE(this);
return LOCAL_YEAR(this);
}
// ECMA 262 - 15.9.5.11
function DateGetUTCFullYear() {
- CHECK_DATE(this);
return UTC_YEAR(this);
}
// ECMA 262 - 15.9.5.12
function DateGetMonth() {
- CHECK_DATE(this);
return LOCAL_MONTH(this);
}
// ECMA 262 - 15.9.5.13
function DateGetUTCMonth() {
- CHECK_DATE(this);
return UTC_MONTH(this);
}
// ECMA 262 - 15.9.5.14
function DateGetDate() {
- CHECK_DATE(this);
return LOCAL_DAY(this);
}
// ECMA 262 - 15.9.5.15
function DateGetUTCDate() {
- CHECK_DATE(this);
return UTC_DAY(this);
}
// ECMA 262 - 15.9.5.16
function DateGetDay() {
- CHECK_DATE(this);
return LOCAL_WEEKDAY(this);
}
// ECMA 262 - 15.9.5.17
function DateGetUTCDay() {
- CHECK_DATE(this);
return UTC_WEEKDAY(this);
}
// ECMA 262 - 15.9.5.18
function DateGetHours() {
- CHECK_DATE(this);
return LOCAL_HOUR(this);
}
// ECMA 262 - 15.9.5.19
function DateGetUTCHours() {
- CHECK_DATE(this);
return UTC_HOUR(this);
}
// ECMA 262 - 15.9.5.20
function DateGetMinutes() {
- CHECK_DATE(this);
return LOCAL_MIN(this);
}
// ECMA 262 - 15.9.5.21
function DateGetUTCMinutes() {
- CHECK_DATE(this);
return UTC_MIN(this);
}
// ECMA 262 - 15.9.5.22
function DateGetSeconds() {
- CHECK_DATE(this);
return LOCAL_SEC(this);
}
// ECMA 262 - 15.9.5.23
function DateGetUTCSeconds() {
- CHECK_DATE(this);
return UTC_SEC(this)
}
// ECMA 262 - 15.9.5.24
function DateGetMilliseconds() {
- CHECK_DATE(this);
return LOCAL_MS(this);
}
// ECMA 262 - 15.9.5.25
function DateGetUTCMilliseconds() {
- CHECK_DATE(this);
return UTC_MS(this);
}
// ECMA 262 - 15.9.5.26
function DateGetTimezoneOffset() {
- CHECK_DATE(this);
return TIMEZONE_OFFSET(this);
}
@@ -512,7 +488,6 @@ function DateSetTime(ms) {
// ECMA 262 - 15.9.5.28
function DateSetMilliseconds(ms) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
ms = ToNumber(ms);
var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
@@ -522,7 +497,6 @@ function DateSetMilliseconds(ms) {
// ECMA 262 - 15.9.5.29
function DateSetUTCMilliseconds(ms) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
ms = ToNumber(ms);
var time = MakeTime(UTC_HOUR(this),
@@ -535,7 +509,6 @@ function DateSetUTCMilliseconds(ms) {
// ECMA 262 - 15.9.5.30
function DateSetSeconds(sec, ms) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : ToNumber(ms);
@@ -546,7 +519,6 @@ function DateSetSeconds(sec, ms) {
// ECMA 262 - 15.9.5.31
function DateSetUTCSeconds(sec, ms) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : ToNumber(ms);
@@ -557,7 +529,6 @@ function DateSetUTCSeconds(sec, ms) {
// ECMA 262 - 15.9.5.33
function DateSetMinutes(min, sec, ms) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
min = ToNumber(min);
var argc = %_ArgumentsLength();
@@ -570,7 +541,6 @@ function DateSetMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCMinutes(min, sec, ms) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
min = ToNumber(min);
var argc = %_ArgumentsLength();
@@ -583,7 +553,6 @@ function DateSetUTCMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.35
function DateSetHours(hour, min, sec, ms) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
@@ -597,7 +566,6 @@ function DateSetHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCHours(hour, min, sec, ms) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
@@ -611,7 +579,6 @@ function DateSetUTCHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.36
function DateSetDate(date) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
@@ -621,7 +588,6 @@ function DateSetDate(date) {
// ECMA 262 - 15.9.5.37
function DateSetUTCDate(date) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
@@ -631,7 +597,6 @@ function DateSetUTCDate(date) {
// ECMA 262 - 15.9.5.38
function DateSetMonth(month, date) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : ToNumber(date);
@@ -642,7 +607,6 @@ function DateSetMonth(month, date) {
// ECMA 262 - 15.9.5.39
function DateSetUTCMonth(month, date) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : ToNumber(date);
@@ -653,7 +617,6 @@ function DateSetUTCMonth(month, date) {
// ECMA 262 - 15.9.5.40
function DateSetFullYear(year, month, date) {
- CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
year = ToNumber(year);
var argc = %_ArgumentsLength();
@@ -674,7 +637,6 @@ function DateSetFullYear(year, month, date) {
// ECMA 262 - 15.9.5.41
function DateSetUTCFullYear(year, month, date) {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
year = ToNumber(year);
var argc = %_ArgumentsLength();
@@ -695,7 +657,6 @@ function DateSetUTCFullYear(year, month, date) {
// ECMA 262 - 15.9.5.42
function DateToUTCString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
@@ -709,7 +670,6 @@ function DateToUTCString() {
// ECMA 262 - B.2.4
function DateGetYear() {
- CHECK_DATE(this);
return LOCAL_YEAR(this) - 1900;
}
@@ -757,7 +717,6 @@ function PadInt(n, digits) {
// ECMA 262 - 15.9.5.43
function DateToISOString() {
- CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
var year = this.getUTCFullYear();
diff --git a/src/3rdparty/v8/src/dateparser-inl.h b/src/3rdparty/v8/src/dateparser-inl.h
index 32f0f9e..3cb36fa 100644
--- a/src/3rdparty/v8/src/dateparser-inl.h
+++ b/src/3rdparty/v8/src/dateparser-inl.h
@@ -62,7 +62,8 @@ bool DateParser::Parse(Vector<Char> str,
// sss is in the range 000..999,
// hh is in the range 00..23,
// mm, ss, and sss default to 00 if missing, and
- // timezone defaults to Z if missing.
+ // timezone defaults to Z if missing
+ // (following Safari, ISO actually demands local time).
// Extensions:
// We also allow sss to have more or less than three digits (but at
// least one).
@@ -148,6 +149,9 @@ bool DateParser::Parse(Vector<Char> str,
} else {
// Garbage words are illegal if a number has been read.
if (has_read_number) return false;
+ // The first number has to be separated from garbage words by
+ // whitespace or other separators.
+ if (scanner.Peek().IsNumber()) return false;
}
} else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
// Parse UTC offset (only after UTC or time).
diff --git a/src/3rdparty/v8/src/debug-agent.cc b/src/3rdparty/v8/src/debug-agent.cc
index 10c0053..e856222 100644
--- a/src/3rdparty/v8/src/debug-agent.cc
+++ b/src/3rdparty/v8/src/debug-agent.cc
@@ -157,7 +157,9 @@ void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
ScopedLock with(session_access_);
ASSERT(session == session_);
if (session == session_) {
- CloseSession();
+ session_->Shutdown();
+ delete session_;
+ session_ = NULL;
}
}
@@ -397,7 +399,7 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
uint16_t character = message[i];
buffer_position +=
unibrow::Utf8::Encode(buffer + buffer_position, character, previous);
- ASSERT(buffer_position < kBufferSize);
+ ASSERT(buffer_position <= kBufferSize);
// Send buffer if full or last character is encoded.
if (kBufferSize - buffer_position <
diff --git a/src/3rdparty/v8/src/debug-debugger.js b/src/3rdparty/v8/src/debug-debugger.js
index 5cdbf14..796d6aa 100644
--- a/src/3rdparty/v8/src/debug-debugger.js
+++ b/src/3rdparty/v8/src/debug-debugger.js
@@ -1454,6 +1454,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.profileRequest_(request, response);
} else if (request.command == 'changelive') {
this.changeLiveRequest_(request, response);
+ } else if (request.command == 'restartframe') {
+ this.restartFrameRequest_(request, response);
} else if (request.command == 'flags') {
this.debuggerFlagsRequest_(request, response);
} else if (request.command == 'v8flags') {
@@ -2081,7 +2083,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
// Global evaluate.
if (global) {
- // Evaluate in the global context.
+ // Evaluate in the native context.
response.body = this.exec_state_.evaluateGlobal(
expression, Boolean(disable_break), additional_context_object);
return;
@@ -2363,9 +2365,6 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
DebugCommandProcessor.prototype.changeLiveRequest_ = function(
request, response) {
- if (!Debug.LiveEdit) {
- return response.failed('LiveEdit feature is not supported');
- }
if (!request.arguments) {
return response.failed('Missing arguments');
}
@@ -2403,6 +2402,37 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
};
+DebugCommandProcessor.prototype.restartFrameRequest_ = function(
+ request, response) {
+ if (!request.arguments) {
+ return response.failed('Missing arguments');
+ }
+ var frame = request.arguments.frame;
+
+ // No frames to evaluate in frame.
+ if (this.exec_state_.frameCount() == 0) {
+ return response.failed('No frames');
+ }
+
+ var frame_mirror;
+ // Check whether a frame was specified.
+ if (!IS_UNDEFINED(frame)) {
+ var frame_number = %ToNumber(frame);
+ if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
+ return response.failed('Invalid frame "' + frame + '"');
+ }
+ // Restart specified frame.
+ frame_mirror = this.exec_state_.frame(frame_number);
+ } else {
+ // Restart selected frame.
+ frame_mirror = this.exec_state_.frame();
+ }
+
+ var result_description = Debug.LiveEdit.RestartFrame(frame_mirror);
+ response.body = {result: result_description};
+};
+
+
DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
response) {
// Check for legal request.
diff --git a/src/3rdparty/v8/src/debug.cc b/src/3rdparty/v8/src/debug.cc
index 9efb5c3..ec25acc 100644
--- a/src/3rdparty/v8/src/debug.cc
+++ b/src/3rdparty/v8/src/debug.cc
@@ -97,8 +97,8 @@ static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
// Isolate::context() may have been NULL when "script collected" event
// occured.
if (context.is_null()) return v8::Local<v8::Context>();
- Handle<Context> global_context(context->global_context());
- return v8::Utils::ToLocal(global_context);
+ Handle<Context> native_context(context->native_context());
+ return v8::Utils::ToLocal(native_context);
}
@@ -261,8 +261,12 @@ void BreakLocationIterator::Reset() {
// Create relocation iterators for the two code objects.
if (reloc_iterator_ != NULL) delete reloc_iterator_;
if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
- reloc_iterator_ = new RelocIterator(debug_info_->code());
- reloc_iterator_original_ = new RelocIterator(debug_info_->original_code());
+ reloc_iterator_ = new RelocIterator(
+ debug_info_->code(),
+ ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
+ reloc_iterator_original_ = new RelocIterator(
+ debug_info_->original_code(),
+ ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
// Position at the first break point.
break_point_ = -1;
@@ -698,7 +702,7 @@ void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
// the function will live in the heap until next gc, and can be found by
- // Runtime::FindSharedFunctionInfoInScript.
+ // Debug::FindSharedFunctionInfoInScript.
BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
it.ClearAllDebugBreak();
debug->RemoveDebugInfo(node->debug_info());
@@ -745,12 +749,15 @@ bool Debug::CompileDebuggerScript(int index) {
isolate->bootstrapper()->NativesSourceLookup(index);
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> script_name = factory->NewStringFromAscii(name);
+ Handle<Context> context = isolate->native_context();
// Compile the script.
Handle<SharedFunctionInfo> function_info;
function_info = Compiler::Compile(source_code,
script_name,
- 0, 0, NULL, NULL,
+ 0, 0,
+ context,
+ NULL, NULL,
Handle<String>::null(),
NATIVES_CODE);
@@ -762,13 +769,12 @@ bool Debug::CompileDebuggerScript(int index) {
}
// Execute the shared function in the debugger context.
- Handle<Context> context = isolate->global_context();
bool caught_exception;
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> exception =
- Execution::TryCall(function, Handle<Object>(context->global()),
+ Execution::TryCall(function, Handle<Object>(context->global_object()),
0, NULL, &caught_exception);
// Check for caught exceptions.
@@ -829,7 +835,7 @@ bool Debug::Load() {
// Expose the builtins object in the debugger context.
Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
- Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
+ Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate_,
JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
@@ -892,16 +898,6 @@ void Debug::Iterate(ObjectVisitor* v) {
}
-void Debug::PutValuesOnStackAndDie(int start,
- Address c_entry_fp,
- Address last_fp,
- Address larger_fp,
- int count,
- int end) {
- OS::Abort();
-}
-
-
Object* Debug::Break(Arguments args) {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
@@ -999,41 +995,16 @@ Object* Debug::Break(Arguments args) {
it.Advance();
}
- // Catch the cases that would lead to crashes and capture
- // - C entry FP at which to start stack crawl.
- // - FP of the frame at which we plan to stop stepping out (last FP).
- // - current FP that's larger than last FP.
- // - Counter for the number of steps to step out.
- if (it.done()) {
- // We crawled the entire stack, never reaching last_fp_.
- PutValuesOnStackAndDie(0xBEEEEEEE,
- frame->fp(),
- thread_local_.last_fp_,
- NULL,
- count,
- 0xFEEEEEEE);
- } else if (it.frame()->fp() != thread_local_.last_fp_) {
- // We crawled over last_fp_, without getting a match.
- PutValuesOnStackAndDie(0xBEEEEEEE,
- frame->fp(),
- thread_local_.last_fp_,
- it.frame()->fp(),
- count,
- 0xFEEEEEEE);
+ // Check that we indeed found the frame we are looking for.
+ CHECK(!it.done() && (it.frame()->fp() == thread_local_.last_fp_));
+ if (step_count > 1) {
+ // Save old count and action to continue stepping after StepOut.
+ thread_local_.queued_step_count_ = step_count - 1;
}
- // If we found original frame
- if (it.frame()->fp() == thread_local_.last_fp_) {
- if (step_count > 1) {
- // Save old count and action to continue stepping after
- // StepOut
- thread_local_.queued_step_count_ = step_count - 1;
- }
-
- // Set up for StepOut to reach target frame
- step_action = StepOut;
- step_count = count;
- }
+ // Set up for StepOut to reach target frame.
+ step_action = StepOut;
+ step_count = count;
}
// Clear all current stepping setup.
@@ -1130,7 +1101,7 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
factory->LookupAsciiSymbol("IsBreakPointTriggered");
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
- debug_context()->global()->GetPropertyNoExceptionThrown(
+ debug_context()->global_object()->GetPropertyNoExceptionThrown(
*is_break_point_triggered_symbol)));
// Get the break id as an object.
@@ -1170,14 +1141,16 @@ Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
}
-void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
+void Debug::SetBreakPoint(Handle<JSFunction> function,
Handle<Object> break_point_object,
int* source_position) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
- if (!EnsureDebugInfo(shared)) {
+ // Make sure the function is compiled and has set up the debug info.
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if retrieving debug info failed.
return;
}
@@ -1198,6 +1171,50 @@ void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
}
+bool Debug::SetBreakPointForScript(Handle<Script> script,
+ Handle<Object> break_point_object,
+ int* source_position) {
+ HandleScope scope(isolate_);
+
+ PrepareForBreakPoints();
+
+ // Obtain shared function info for the function.
+ Object* result = FindSharedFunctionInfoInScript(script, *source_position);
+ if (result->IsUndefined()) return false;
+
+ // Make sure the function has set up the debug info.
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
+ if (!EnsureDebugInfo(shared, Handle<JSFunction>::null())) {
+ // Return if retrieving debug info failed.
+ return false;
+ }
+
+ // Find position within function. The script position might be before the
+ // source position of the first function.
+ int position;
+ if (shared->start_position() > *source_position) {
+ position = 0;
+ } else {
+ position = *source_position - shared->start_position();
+ }
+
+ Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ // Source positions starts with zero.
+ ASSERT(position >= 0);
+
+ // Find the break point and change it.
+ BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
+ it.FindBreakLocationFromPosition(position);
+ it.SetBreakPoint(break_point_object);
+
+ *source_position = it.position() + shared->start_position();
+
+ // At least one active break point now.
+ ASSERT(debug_info->GetBreakPointCount() > 0);
+ return true;
+}
+
+
void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
HandleScope scope(isolate_);
@@ -1249,10 +1266,12 @@ void Debug::ClearAllBreakPoints() {
}
-void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
+void Debug::FloodWithOneShot(Handle<JSFunction> function) {
PrepareForBreakPoints();
- // Make sure the function has set up the debug info.
- if (!EnsureDebugInfo(shared)) {
+
+ // Make sure the function is compiled and has set up the debug info.
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if we failed to retrieve the debug info.
return;
}
@@ -1272,8 +1291,8 @@ void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
if (!bindee.is_null() && bindee->IsJSFunction() &&
!JSFunction::cast(*bindee)->IsBuiltin()) {
- Handle<SharedFunctionInfo> shared_info(JSFunction::cast(*bindee)->shared());
- Debug::FloodWithOneShot(shared_info);
+ Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
+ Debug::FloodWithOneShot(bindee_function);
}
}
@@ -1288,11 +1307,9 @@ void Debug::FloodHandlerWithOneShot() {
for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->HasHandler()) {
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(
- JSFunction::cast(frame->function())->shared());
// Flood the function with the catch block with break points
- FloodWithOneShot(shared);
+ JSFunction* function = JSFunction::cast(frame->function());
+ FloodWithOneShot(Handle<JSFunction>(function));
return;
}
}
@@ -1359,14 +1376,14 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
frames_it.Advance();
// Fill the function to return to with one-shot break points.
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
- FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ FloodWithOneShot(Handle<JSFunction>(function));
return;
}
// Get the debug info (create it if it does not exist).
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- if (!EnsureDebugInfo(shared)) {
+ Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if ensuring debug info failed.
return;
}
@@ -1436,7 +1453,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
if (!frames_it.done()) {
// Fill the function to return to with one-shot break points.
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
- FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ FloodWithOneShot(Handle<JSFunction>(function));
// Set target frame pointer.
ActivateStepOut(frames_it.frame());
}
@@ -1446,21 +1463,19 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Step next or step min.
// Fill the current function with one-shot break points.
- FloodWithOneShot(shared);
+ FloodWithOneShot(function);
// Remember source position and frame to handle step next.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->fp();
+ thread_local_.last_fp_ = frame->UnpaddedFP();
} else {
// If there's restarter frame on top of the stack, just get the pointer
// to function which is going to be restarted.
if (is_at_restarted_function) {
Handle<JSFunction> restarted_function(
JSFunction::cast(*thread_local_.restarter_frame_function_pointer_));
- Handle<SharedFunctionInfo> restarted_shared(
- restarted_function->shared());
- FloodWithOneShot(restarted_shared);
+ FloodWithOneShot(restarted_function);
} else if (!call_function_stub.is_null()) {
// If it's CallFunction stub ensure target function is compiled and flood
// it with one shot breakpoints.
@@ -1502,7 +1517,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
} else if (!js_function->IsBuiltin()) {
// Don't step into builtins.
// It will also compile target function if it's not compiled yet.
- FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
+ FloodWithOneShot(js_function);
}
}
}
@@ -1511,7 +1526,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// a call target as the function called might be a native function for
// which step in will not stop. It also prepares for stepping in
// getters/setters.
- FloodWithOneShot(shared);
+ FloodWithOneShot(function);
if (is_load_or_store) {
// Remember source position and frame to handle step in getter/setter. If
@@ -1520,7 +1535,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// propagated on the next Debug::Break.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc());
- thread_local_.last_fp_ = frame->fp();
+ thread_local_.last_fp_ = frame->UnpaddedFP();
}
// Step in or Step in min
@@ -1555,7 +1570,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
// Continue if we are still on the same frame and in the same statement.
int current_statement_position =
break_location_iterator->code()->SourceStatementPosition(frame->pc());
- return thread_local_.last_fp_ == frame->fp() &&
+ return thread_local_.last_fp_ == frame->UnpaddedFP() &&
thread_local_.last_statement_position_ == current_statement_position;
}
@@ -1711,12 +1726,11 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// function.
if (!holder.is_null() && holder->IsJSFunction() &&
!JSFunction::cast(*holder)->IsBuiltin()) {
- Handle<SharedFunctionInfo> shared_info(
- JSFunction::cast(*holder)->shared());
- Debug::FloodWithOneShot(shared_info);
+ Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
+ Debug::FloodWithOneShot(js_function);
}
} else {
- Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ Debug::FloodWithOneShot(function);
}
}
}
@@ -1756,7 +1770,7 @@ void Debug::ClearOneShot() {
void Debug::ActivateStepIn(StackFrame* frame) {
ASSERT(!StepOutActive());
- thread_local_.step_into_fp_ = frame->fp();
+ thread_local_.step_into_fp_ = frame->UnpaddedFP();
}
@@ -1767,7 +1781,7 @@ void Debug::ClearStepIn() {
void Debug::ActivateStepOut(StackFrame* frame) {
ASSERT(!StepInActive());
- thread_local_.step_out_fp_ = frame->fp();
+ thread_local_.step_out_fp_ = frame->UnpaddedFP();
}
@@ -1784,20 +1798,19 @@ void Debug::ClearStepNext() {
// Helper function to compile full code for debugging. This code will
-// have debug break slots and deoptimization
-// information. Deoptimization information is required in case that an
-// optimized version of this function is still activated on the
-// stack. It will also make sure that the full code is compiled with
-// the same flags as the previous version - that is flags which can
-// change the code generated. The current method of mapping from
-// already compiled full code without debug break slots to full code
-// with debug break slots depends on the generated code is otherwise
-// exactly the same.
-static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
+// have debug break slots and deoptimization information. Deoptimization
+// information is required in case that an optimized version of this
+// function is still activated on the stack. It will also make sure that
+// the full code is compiled with the same flags as the previous version,
+// that is flags which can change the code generated. The current method
+// of mapping from already compiled full code without debug break slots
+// to full code with debug break slots depends on the generated code is
+// otherwise exactly the same.
+static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
Handle<Code> current_code) {
ASSERT(!current_code->has_debug_break_slots());
- CompilationInfo info(shared);
+ CompilationInfoWithZone info(function);
info.MarkCompilingForDebugging(current_code);
ASSERT(!info.shared_info()->is_compiled());
ASSERT(!info.isolate()->has_pending_exception());
@@ -1809,7 +1822,7 @@ static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
info.isolate()->clear_pending_exception();
#if DEBUG
if (result) {
- Handle<Code> new_code(shared->code());
+ Handle<Code> new_code(function->shared()->code());
ASSERT(new_code->has_debug_break_slots());
ASSERT(current_code->is_compiled_optimizable() ==
new_code->is_compiled_optimizable());
@@ -1869,29 +1882,48 @@ static void RedirectActivationsToRecompiledCodeOnThread(
continue;
}
- intptr_t delta = frame->pc() - frame_code->instruction_start();
- int debug_break_slot_count = 0;
- int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
+ // Iterate over the RelocInfo in the original code to compute the sum of the
+ // constant pools sizes. (See Assembler::CheckConstPool())
+ // Note that this is only useful for architectures using constant pools.
+ int constpool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL);
+ int frame_const_pool_size = 0;
+ for (RelocIterator it(*frame_code, constpool_mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->pc() >= frame->pc()) break;
+ frame_const_pool_size += static_cast<int>(info->data());
+ }
+ intptr_t frame_offset =
+ frame->pc() - frame_code->instruction_start() - frame_const_pool_size;
+
+ // Iterate over the RelocInfo for new code to find the number of bytes
+ // generated for debug slots and constant pools.
+ int debug_break_slot_bytes = 0;
+ int new_code_const_pool_size = 0;
+ int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+ RelocInfo::ModeMask(RelocInfo::CONST_POOL);
for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
// Check if the pc in the new code with debug break
// slots is before this slot.
RelocInfo* info = it.rinfo();
- int debug_break_slot_bytes =
- debug_break_slot_count * Assembler::kDebugBreakSlotLength;
- intptr_t new_delta =
- info->pc() -
- new_code->instruction_start() -
- debug_break_slot_bytes;
- if (new_delta > delta) {
+ intptr_t new_offset = info->pc() - new_code->instruction_start() -
+ new_code_const_pool_size - debug_break_slot_bytes;
+ if (new_offset >= frame_offset) {
break;
}
- // Passed a debug break slot in the full code with debug
- // break slots.
- debug_break_slot_count++;
+ if (RelocInfo::IsDebugBreakSlot(info->rmode())) {
+ debug_break_slot_bytes += Assembler::kDebugBreakSlotLength;
+ } else {
+ ASSERT(RelocInfo::IsConstPool(info->rmode()));
+ // The size of the constant pool is encoded in the data.
+ new_code_const_pool_size += static_cast<int>(info->data());
+ }
}
- int debug_break_slot_bytes =
- debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+
+ // Compute the equivalent pc in the new code.
+ byte* new_pc = new_code->instruction_start() + frame_offset +
+ debug_break_slot_bytes + new_code_const_pool_size;
+
if (FLAG_trace_deopt) {
PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
"with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
@@ -1908,14 +1940,12 @@ static void RedirectActivationsToRecompiledCodeOnThread(
new_code->instruction_size(),
new_code->instruction_size(),
reinterpret_cast<intptr_t>(frame->pc()),
- reinterpret_cast<intptr_t>(new_code->instruction_start()) +
- delta + debug_break_slot_bytes);
+ reinterpret_cast<intptr_t>(new_pc));
}
// Patch the return address to return into the code with
// debug break slots.
- frame->set_pc(
- new_code->instruction_start() + delta + debug_break_slot_bytes);
+ frame->set_pc(new_pc);
}
}
@@ -1957,6 +1987,9 @@ void Debug::PrepareForBreakPoints() {
Handle<Code> lazy_compile =
Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
+ // There will be at least one break point when we are done.
+ has_break_points_ = true;
+
// Keep the list of activated functions in a handlified list as it
// is used both in GC and non-GC code.
List<Handle<JSFunction> > active_functions(100);
@@ -2013,6 +2046,7 @@ void Debug::PrepareForBreakPoints() {
// patch the return address to run in the new compiled code.
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
+ Handle<SharedFunctionInfo> shared(function->shared());
if (function->code()->kind() == Code::FUNCTION &&
function->code()->has_debug_break_slots()) {
@@ -2020,7 +2054,6 @@ void Debug::PrepareForBreakPoints() {
continue;
}
- Handle<SharedFunctionInfo> shared(function->shared());
// If recompilation is not possible just skip it.
if (shared->is_toplevel() ||
!shared->allows_lazy_compilation() ||
@@ -2034,13 +2067,12 @@ void Debug::PrepareForBreakPoints() {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
Handle<Code> current_code(function->shared()->code());
- ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
ASSERT(current_code->kind() == Code::FUNCTION);
- CompileFullCodeForDebugging(shared, current_code);
+ CompileFullCodeForDebugging(function, current_code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
if (!shared->is_compiled()) {
@@ -2063,16 +2095,130 @@ void Debug::PrepareForBreakPoints() {
}
+Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
+ int position) {
+ // Iterate the heap looking for SharedFunctionInfo generated from the
+ // script. The inner most SharedFunctionInfo containing the source position
+ // for the requested break point is found.
+ // NOTE: This might require several heap iterations. If the SharedFunctionInfo
+ // which is found is not compiled it is compiled and the heap is iterated
+ // again as the compilation might create inner functions from the newly
+ // compiled function and the actual requested break point might be in one of
+ // these functions.
+ // NOTE: The below fix-point iteration depends on all functions that cannot be
+ // compiled lazily without a context to not be compiled at all. Compilation
+ // will be triggered at points where we do not need a context.
+ bool done = false;
+ // The current candidate for the source position:
+ int target_start_position = RelocInfo::kNoPosition;
+ Handle<JSFunction> target_function;
+ Handle<SharedFunctionInfo> target;
+ while (!done) {
+ { // Extra scope for iterator and no-allocation.
+ isolate_->heap()->EnsureHeapIsIterable();
+ AssertNoAllocation no_alloc_during_heap_iteration;
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next();
+ obj != NULL; obj = iterator.next()) {
+ bool found_next_candidate = false;
+ Handle<JSFunction> function;
+ Handle<SharedFunctionInfo> shared;
+ if (obj->IsJSFunction()) {
+ function = Handle<JSFunction>(JSFunction::cast(obj));
+ shared = Handle<SharedFunctionInfo>(function->shared());
+ ASSERT(shared->allows_lazy_compilation() || shared->is_compiled());
+ found_next_candidate = true;
+ } else if (obj->IsSharedFunctionInfo()) {
+ shared = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
+ // Skip functions that we cannot compile lazily without a context,
+ // which is not available here, because there is no closure.
+ found_next_candidate = shared->is_compiled() ||
+ shared->allows_lazy_compilation_without_context();
+ }
+ if (!found_next_candidate) continue;
+ if (shared->script() == *script) {
+ // If the SharedFunctionInfo found has the requested script data and
+ // contains the source position it is a candidate.
+ int start_position = shared->function_token_position();
+ if (start_position == RelocInfo::kNoPosition) {
+ start_position = shared->start_position();
+ }
+ if (start_position <= position &&
+ position <= shared->end_position()) {
+ // If there is no candidate or this function is within the current
+ // candidate this is the new candidate.
+ if (target.is_null()) {
+ target_start_position = start_position;
+ target_function = function;
+ target = shared;
+ } else {
+ if (target_start_position == start_position &&
+ shared->end_position() == target->end_position()) {
+ // If a top-level function contains only one function
+ // declaration the source for the top-level and the function
+ // is the same. In that case prefer the non top-level function.
+ if (!shared->is_toplevel()) {
+ target_start_position = start_position;
+ target_function = function;
+ target = shared;
+ }
+ } else if (target_start_position <= start_position &&
+ shared->end_position() <= target->end_position()) {
+ // This containment check includes equality as a function
+ // inside a top-level function can share either start or end
+ // position with the top-level function.
+ target_start_position = start_position;
+ target_function = function;
+ target = shared;
+ }
+ }
+ }
+ }
+ } // End for loop.
+ } // End no-allocation scope.
+
+ if (target.is_null()) {
+ return isolate_->heap()->undefined_value();
+ }
+
+ // There will be at least one break point when we are done.
+ has_break_points_ = true;
+
+ // If the candidate found is compiled we are done.
+ done = target->is_compiled();
+ if (!done) {
+ // If the candidate is not compiled, compile it to reveal any inner
+ // functions which might contain the requested source position. This
+ // will compile all inner functions that cannot be compiled without a
+ // context, because Compiler::BuildFunctionInfo checks whether the
+ // debugger is active.
+ if (target_function.is_null()) {
+ SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
+ } else {
+ JSFunction::CompileLazy(target_function, KEEP_EXCEPTION);
+ }
+ }
+ } // End while loop.
+
+ return *target;
+}
+
+
// Ensures the debug information is present for shared.
-bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
+bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
+ Handle<JSFunction> function) {
// Return if we already have the debug info for shared.
if (HasDebugInfo(shared)) {
ASSERT(shared->is_compiled());
return true;
}
- // Ensure shared in compiled. Return false if this failed.
- if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+ // There will be at least one break point when we are done.
+ has_break_points_ = true;
+
+ // Ensure function is compiled. Return false if this failed.
+ if (!function.is_null() &&
+ !JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
@@ -2084,9 +2230,6 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
node->set_next(debug_info_list_);
debug_info_list_ = node;
- // Now there is at least one break point.
- has_break_points_ = true;
-
return true;
}
@@ -2128,9 +2271,9 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
PrepareForBreakPoints();
// Get the executing function in which the debug break occurred.
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- if (!EnsureDebugInfo(shared)) {
+ Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if we failed to retrieve the debug info.
return;
}
@@ -2146,7 +2289,7 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Find the call address in the running code. This address holds the call to
// either a DebugBreakXXX or to the debug break return entry code if the
// break point is still active after processing the break point.
- Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
+ Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
// Check if the location is at JS exit or debug break slot.
bool at_js_return = false;
@@ -2220,9 +2363,9 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
PrepareForBreakPoints();
// Get the executing function in which the debug break occurred.
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
- if (!EnsureDebugInfo(shared)) {
+ Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
// Return if we failed to retrieve the debug info.
return false;
}
@@ -2235,7 +2378,7 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
#endif
// Find the call address in the running code.
- Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
+ Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
// Check if the location is at JS return.
RelocIterator it(debug_info->code());
@@ -2253,7 +2396,9 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
FrameDropMode mode,
Object** restarter_frame_function_pointer) {
- thread_local_.frame_drop_mode_ = mode;
+ if (mode != CURRENTLY_SET_MODE) {
+ thread_local_.frame_drop_mode_ = mode;
+ }
thread_local_.break_frame_id_ = new_break_frame_id;
thread_local_.restarter_frame_function_pointer_ =
restarter_frame_function_pointer;
@@ -2268,7 +2413,7 @@ const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
bool Debug::IsDebugGlobal(GlobalObject* global) {
- return IsLoaded() && global == debug_context()->global();
+ return IsLoaded() && global == debug_context()->global_object();
}
@@ -2280,12 +2425,13 @@ void Debug::ClearMirrorCache() {
// Clear the mirror cache.
Handle<String> function_name =
isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
- Handle<Object> fun(Isolate::Current()->global()->GetPropertyNoExceptionThrown(
+ Handle<Object> fun(
+ Isolate::Current()->global_object()->GetPropertyNoExceptionThrown(
*function_name));
ASSERT(fun->IsJSFunction());
bool caught_exception;
Execution::TryCall(Handle<JSFunction>::cast(fun),
- Handle<JSObject>(Debug::debug_context()->global()),
+ Handle<JSObject>(Debug::debug_context()->global_object()),
0, NULL, &caught_exception);
}
@@ -2372,6 +2518,7 @@ Debugger::Debugger(Isolate* isolate)
event_listener_data_(Handle<Object>()),
compiling_natives_(false),
is_loading_debugger_(false),
+ live_edit_enabled_(true),
never_unload_debugger_(false),
force_debugger_active_(false),
message_handler_(NULL),
@@ -2407,7 +2554,8 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
Handle<String> constructor_str =
isolate_->factory()->LookupSymbol(constructor_name);
Handle<Object> constructor(
- isolate_->global()->GetPropertyNoExceptionThrown(*constructor_str));
+ isolate_->global_object()->GetPropertyNoExceptionThrown(
+ *constructor_str));
ASSERT(constructor->IsJSFunction());
if (!constructor->IsJSFunction()) {
*caught_exception = true;
@@ -2415,7 +2563,7 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
}
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
- Handle<JSObject>(isolate_->debug()->debug_context()->global()),
+ Handle<JSObject>(isolate_->debug()->debug_context()->global_object()),
argc,
argv,
caught_exception);
@@ -2637,7 +2785,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Handle<String> update_script_break_points_symbol =
isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
Handle<Object> update_script_break_points =
- Handle<Object>(debug->debug_context()->global()->
+ Handle<Object>(debug->debug_context()->global_object()->
GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
if (!update_script_break_points->IsJSFunction()) {
return;
@@ -2682,6 +2830,7 @@ void Debugger::OnScriptCollected(int id) {
HandleScope scope(isolate_);
// No more to do if not debugging.
+ if (isolate_->debug()->InDebugger()) return;
if (!IsDebuggerActive()) return;
if (!Debugger::EventActive(v8::ScriptCollected)) return;
@@ -2793,7 +2942,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
event_listener_data_ };
bool caught_exception;
Execution::TryCall(fun,
- isolate_->global(),
+ isolate_->global_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
diff --git a/src/3rdparty/v8/src/debug.h b/src/3rdparty/v8/src/debug.h
index d9c966c..150e29e 100644
--- a/src/3rdparty/v8/src/debug.h
+++ b/src/3rdparty/v8/src/debug.h
@@ -232,19 +232,16 @@ class Debug {
void PreemptionWhileInDebugger();
void Iterate(ObjectVisitor* v);
- NO_INLINE(void PutValuesOnStackAndDie(int start,
- Address c_entry_fp,
- Address last_fp,
- Address larger_fp,
- int count,
- int end));
Object* Break(Arguments args);
- void SetBreakPoint(Handle<SharedFunctionInfo> shared,
+ void SetBreakPoint(Handle<JSFunction> function,
Handle<Object> break_point_object,
int* source_position);
+ bool SetBreakPointForScript(Handle<Script> script,
+ Handle<Object> break_point_object,
+ int* source_position);
void ClearBreakPoint(Handle<Object> break_point_object);
void ClearAllBreakPoints();
- void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+ void FloodWithOneShot(Handle<JSFunction> function);
void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
@@ -260,8 +257,14 @@ class Debug {
void PrepareForBreakPoints();
- // Returns whether the operation succeeded.
- bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
+ // This function is used in FunctionNameUsing* tests.
+ Object* FindSharedFunctionInfoInScript(Handle<Script> script, int position);
+
+ // Returns whether the operation succeeded. Compilation can only be triggered
+ // if a valid closure is passed as the second argument, otherwise the shared
+ // function needs to be compiled already.
+ bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
+ Handle<JSFunction> function);
// Returns true if the current stub call is patched to call the debugger.
static bool IsDebugBreak(Address addr);
@@ -440,7 +443,8 @@ class Debug {
// The top JS frame had been calling some C++ function. The return address
// gets patched automatically.
FRAME_DROPPED_IN_DIRECT_CALL,
- FRAME_DROPPED_IN_RETURN_CALL
+ FRAME_DROPPED_IN_RETURN_CALL,
+ CURRENTLY_SET_MODE
};
void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
@@ -789,7 +793,6 @@ class Debugger {
};
void OnAfterCompile(Handle<Script> script,
AfterCompileFlags after_compile_flags);
- void OnNewFunction(Handle<JSFunction> fun);
void OnScriptCollected(int id);
void ProcessDebugEvent(v8::DebugEvent event,
Handle<JSObject> event_data,
@@ -871,6 +874,8 @@ class Debugger {
bool compiling_natives() const { return compiling_natives_; }
void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
bool is_loading_debugger() const { return is_loading_debugger_; }
+ void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
+ bool live_edit_enabled() const { return live_edit_enabled_; }
void set_force_debugger_active(bool force_debugger_active) {
force_debugger_active_ = force_debugger_active;
}
@@ -899,6 +904,7 @@ class Debugger {
Handle<Object> event_listener_data_;
bool compiling_natives_; // Are we compiling natives?
bool is_loading_debugger_; // Are we loading the debugger?
+ bool live_edit_enabled_; // Enable LiveEdit.
bool never_unload_debugger_; // Can we unload the debugger?
bool force_debugger_active_; // Activate debugger without event listeners.
v8::Debug::MessageHandler2 message_handler_;
diff --git a/src/3rdparty/v8/src/deoptimizer.cc b/src/3rdparty/v8/src/deoptimizer.cc
index 2a30ddd..9d16211 100644
--- a/src/3rdparty/v8/src/deoptimizer.cc
+++ b/src/3rdparty/v8/src/deoptimizer.cc
@@ -27,6 +27,7 @@
#include "v8.h"
+#include "accessors.h"
#include "codegen.h"
#include "deoptimizer.h"
#include "disasm.h"
@@ -40,8 +41,11 @@ namespace v8 {
namespace internal {
DeoptimizerData::DeoptimizerData() {
- eager_deoptimization_entry_code_ = NULL;
- lazy_deoptimization_entry_code_ = NULL;
+ eager_deoptimization_entry_code_entries_ = -1;
+ lazy_deoptimization_entry_code_entries_ = -1;
+ size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
+ eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
+ lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
current_ = NULL;
deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -51,16 +55,18 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
- if (eager_deoptimization_entry_code_ != NULL) {
- Isolate::Current()->memory_allocator()->Free(
- eager_deoptimization_entry_code_);
- eager_deoptimization_entry_code_ = NULL;
- }
- if (lazy_deoptimization_entry_code_ != NULL) {
- Isolate::Current()->memory_allocator()->Free(
- lazy_deoptimization_entry_code_);
- lazy_deoptimization_entry_code_ = NULL;
+ delete eager_deoptimization_entry_code_;
+ eager_deoptimization_entry_code_ = NULL;
+ delete lazy_deoptimization_entry_code_;
+ lazy_deoptimization_entry_code_ = NULL;
+
+ DeoptimizingCodeListNode* current = deoptimizing_code_list_;
+ while (current != NULL) {
+ DeoptimizingCodeListNode* prev = current;
+ current = current->next();
+ delete prev;
}
+ deoptimizing_code_list_ = NULL;
}
@@ -95,6 +101,20 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
}
+// No larger than 2K on all platforms
+static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
+
+
+size_t Deoptimizer::GetMaxDeoptTableSize() {
+ int entries_size =
+ Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
+ int commit_page_size = static_cast<int>(OS::CommitPageSize());
+ int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
+ commit_page_size) + 1;
+ return static_cast<size_t>(commit_page_size * page_count);
+}
+
+
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
Deoptimizer* result = isolate->deoptimizer_data()->current_;
@@ -268,20 +288,29 @@ void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
+ Isolate* isolate = context->GetIsolate();
+ ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
AssertNoAllocation no_allocation;
- ASSERT(context->IsGlobalContext());
+ ASSERT(context->IsNativeContext());
visitor->EnterContext(context);
- // Run through the list of optimized functions and deoptimize them.
+
+ // Create a snapshot of the optimized functions list. This is needed because
+ // visitors might remove more than one link from the list at once.
+ ZoneList<JSFunction*> snapshot(1, isolate->runtime_zone());
Object* element = context->OptimizedFunctionsListHead();
while (!element->IsUndefined()) {
JSFunction* element_function = JSFunction::cast(element);
- // Get the next link before deoptimizing as deoptimizing will clear the
- // next link.
+ snapshot.Add(element_function, isolate->runtime_zone());
element = element_function->next_function_link();
- visitor->VisitFunction(element_function);
}
+
+ // Run through the snapshot of optimized functions and visit them.
+ for (int i = 0; i < snapshot.length(); ++i) {
+ visitor->VisitFunction(snapshot.at(i));
+ }
+
visitor->LeaveContext(context);
}
@@ -294,10 +323,10 @@ void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
Object* proto = object->GetPrototype();
ASSERT(proto->IsJSGlobalObject());
VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(proto)->global_context(), visitor);
+ GlobalObject::cast(proto)->native_context(), visitor);
} else if (object->IsGlobalObject()) {
VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(object)->global_context(), visitor);
+ GlobalObject::cast(object)->native_context(), visitor);
}
}
@@ -306,12 +335,12 @@ void Deoptimizer::VisitAllOptimizedFunctions(
OptimizedFunctionVisitor* visitor) {
AssertNoAllocation no_allocation;
- // Run through the list of all global contexts and deoptimize.
- Object* context = Isolate::Current()->heap()->global_contexts_list();
+ // Run through the list of all native contexts and deoptimize.
+ Object* context = Isolate::Current()->heap()->native_contexts_list();
while (!context->IsUndefined()) {
// GC can happen when the context is not fully initialized,
// so the global field of the context can be undefined.
- Object* global = Context::cast(context)->get(Context::GLOBAL_INDEX);
+ Object* global = Context::cast(context)->get(Context::GLOBAL_OBJECT_INDEX);
if (!global->IsUndefined()) {
VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
visitor);
@@ -354,10 +383,13 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
bailout_type_(type),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
+ has_alignment_padding_(0),
input_(NULL),
output_count_(0),
jsframe_count_(0),
output_(NULL),
+ deferred_arguments_objects_values_(0),
+ deferred_arguments_objects_(0),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) {
@@ -378,6 +410,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
reinterpret_cast<intptr_t>(from),
fp_to_sp_delta - (2 * kPointerSize));
}
+ function->shared()->increment_deopt_count();
// Find the optimized code.
if (type == EAGER) {
ASSERT(from == NULL);
@@ -440,61 +473,61 @@ void Deoptimizer::DeleteFrameDescriptions() {
}
-Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
+Address Deoptimizer::GetDeoptimizationEntry(int id,
+ BailoutType type,
+ GetEntryMode mode) {
ASSERT(id >= 0);
- if (id >= kNumberOfEntries) return NULL;
- MemoryChunk* base = NULL;
+ if (id >= kMaxNumberOfEntries) return NULL;
+ VirtualMemory* base = NULL;
+ if (mode == ENSURE_ENTRY_CODE) {
+ EnsureCodeForDeoptimizationEntry(type, id);
+ } else {
+ ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
+ }
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
- if (data->eager_deoptimization_entry_code_ == NULL) {
- data->eager_deoptimization_entry_code_ = CreateCode(type);
- }
base = data->eager_deoptimization_entry_code_;
} else {
- if (data->lazy_deoptimization_entry_code_ == NULL) {
- data->lazy_deoptimization_entry_code_ = CreateCode(type);
- }
base = data->lazy_deoptimization_entry_code_;
}
return
- static_cast<Address>(base->area_start()) + (id * table_entry_size_);
+ static_cast<Address>(base->address()) + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- MemoryChunk* base = NULL;
+ VirtualMemory* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
} else {
base = data->lazy_deoptimization_entry_code_;
}
+ Address base_casted = reinterpret_cast<Address>(base->address());
if (base == NULL ||
- addr < base->area_start() ||
- addr >= base->area_start() +
- (kNumberOfEntries * table_entry_size_)) {
+ addr < base->address() ||
+ addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
- static_cast<int>(addr - base->area_start()) % table_entry_size_);
- return static_cast<int>(addr - base->area_start()) / table_entry_size_;
+ static_cast<int>(addr - base_casted) % table_entry_size_);
+ return static_cast<int>(addr - base_casted) / table_entry_size_;
}
int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
- unsigned id,
+ BailoutId id,
SharedFunctionInfo* shared) {
// TODO(kasperl): For now, we do a simple linear search for the PC
// offset associated with the given node id. This should probably be
// changed to a binary search.
int length = data->DeoptPoints();
- Smi* smi_id = Smi::FromInt(id);
for (int i = 0; i < length; i++) {
- if (data->AstId(i) == smi_id) {
+ if (data->AstId(i) == id) {
return data->PcAndState(i)->value();
}
}
- PrintF("[couldn't find pc offset for node=%u]\n", id);
+ PrintF("[couldn't find pc offset for node=%d]\n", id.ToInt());
PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
// Print the source code if available.
HeapStringAllocator string_allocator;
@@ -541,7 +574,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// described by the input data.
DeoptimizationInputData* input_data =
DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
- unsigned node_id = input_data->AstId(bailout_id_)->value();
+ BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
input_data->TranslationIndex(bailout_id_)->value();
@@ -579,7 +612,24 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::CONSTRUCT_STUB_FRAME:
DoComputeConstructStubFrame(&iterator, i);
break;
- default:
+ case Translation::GETTER_STUB_FRAME:
+ DoComputeAccessorStubFrame(&iterator, i, false);
+ break;
+ case Translation::SETTER_STUB_FRAME:
+ DoComputeAccessorStubFrame(&iterator, i, true);
+ break;
+ case Translation::BEGIN:
+ case Translation::REGISTER:
+ case Translation::INT32_REGISTER:
+ case Translation::UINT32_REGISTER:
+ case Translation::DOUBLE_REGISTER:
+ case Translation::STACK_SLOT:
+ case Translation::INT32_STACK_SLOT:
+ case Translation::UINT32_STACK_SLOT:
+ case Translation::DOUBLE_STACK_SLOT:
+ case Translation::LITERAL:
+ case Translation::ARGUMENTS_OBJECT:
+ case Translation::DUPLICATE:
UNREACHABLE();
break;
}
@@ -593,19 +643,34 @@ void Deoptimizer::DoComputeOutputFrames() {
PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function));
function->PrintName();
- PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n",
- node_id,
+ PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
+ " took %0.3f ms]\n",
+ node_id.ToInt(),
output_[index]->GetPc(),
FullCodeGenerator::State2String(
static_cast<FullCodeGenerator::State>(
output_[index]->GetState()->value())),
+ has_alignment_padding_ ? "with padding" : "no padding",
ms);
}
}
-void Deoptimizer::MaterializeHeapNumbers() {
+void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
ASSERT_NE(DEBUGGER, bailout_type_);
+
+ // Handlify all argument object values before triggering any allocation.
+ List<Handle<Object> > values(deferred_arguments_objects_values_.length());
+ for (int i = 0; i < deferred_arguments_objects_values_.length(); ++i) {
+ values.Add(Handle<Object>(deferred_arguments_objects_values_[i]));
+ }
+
+ // Play it safe and clear all unhandlified values before we continue.
+ deferred_arguments_objects_values_.Clear();
+
+ // Materialize all heap numbers before looking at arguments because when the
+ // output frames are used to materialize arguments objects later on they need
+ // to already contain valid heap numbers.
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
@@ -615,9 +680,55 @@ void Deoptimizer::MaterializeHeapNumbers() {
d.value(),
d.slot_address());
}
-
Memory::Object_at(d.slot_address()) = *num;
}
+
+ // Materialize arguments objects one frame at a time.
+ for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
+ if (frame_index != 0) it->Advance();
+ JavaScriptFrame* frame = it->frame();
+ Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate_);
+ Handle<JSObject> arguments;
+ for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
+ if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
+ ArgumentsObjectMaterializationDescriptor descriptor =
+ deferred_arguments_objects_.RemoveLast();
+ const int length = descriptor.arguments_length();
+ if (arguments.is_null()) {
+ if (frame->has_adapted_arguments()) {
+ // Use the arguments adapter frame we just built to materialize the
+ // arguments object. FunctionGetArguments can't throw an exception,
+ // so cast away the doubt with an assert.
+ arguments = Handle<JSObject>(JSObject::cast(
+ Accessors::FunctionGetArguments(*function,
+ NULL)->ToObjectUnchecked()));
+ values.RewindBy(length);
+ } else {
+ // Construct an arguments object and copy the parameters to a newly
+ // allocated arguments object backing store.
+ arguments =
+ isolate_->factory()->NewArgumentsObject(function, length);
+ Handle<FixedArray> array =
+ isolate_->factory()->NewFixedArray(length);
+ ASSERT(array->length() == length);
+ for (int i = length - 1; i >= 0 ; --i) {
+ array->set(i, *values.RemoveLast());
+ }
+ arguments->set_elements(*array);
+ }
+ }
+ frame->SetExpression(i, *arguments);
+ ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments);
+ if (FLAG_trace_deopt) {
+ PrintF("Materializing %sarguments object for %p: ",
+ frame->has_adapted_arguments() ? "(adapted) " : "",
+ reinterpret_cast<void*>(descriptor.slot_address()));
+ arguments->ShortPrint();
+ PrintF("\n");
+ }
+ }
+ }
+ }
}
@@ -696,6 +807,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
@@ -744,6 +857,34 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
return;
}
+ case Translation::UINT32_REGISTER: {
+ int input_reg = iterator->Next();
+ uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
+ bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (FLAG_trace_deopt) {
+ PrintF(
+ " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
+ " ; uint %s (%s)\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ value,
+ converter.NameOfCPURegister(input_reg),
+ is_smi ? "smi" : "heap number");
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+ } else {
+ // We save the untagged value on the side and store a GC-safe
+ // temporary placeholder in the frame.
+ AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+ static_cast<double>(static_cast<uint32_t>(value)));
+ output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+ }
+ return;
+ }
+
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
@@ -769,7 +910,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
+ PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
input_value,
input_offset);
@@ -789,7 +930,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n",
+ PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
output_offset,
value,
input_offset,
@@ -809,13 +950,43 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
return;
}
+ case Translation::UINT32_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset =
+ input_->GetOffsetFromSlotIndex(input_slot_index);
+ uintptr_t value =
+ static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
+ bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": ",
+ output_[frame_index]->GetTop() + output_offset);
+ PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
+ output_offset,
+ value,
+ input_offset,
+ is_smi ? "smi" : "heap number");
+ }
+ if (is_smi) {
+ intptr_t tagged_value =
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+ output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+ } else {
+ // We save the untagged value on the side and store a GC-safe
+ // temporary placeholder in the frame.
+ AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+ static_cast<double>(static_cast<uint32_t>(value)));
+ output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+ }
+ return;
+ }
+
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
@@ -843,8 +1014,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
case Translation::ARGUMENTS_OBJECT: {
- // Use the arguments marker value as a sentinel and fill in the arguments
- // object after the deoptimized frame is built.
+ int args_index = iterator->Next() + 1; // Skip receiver.
+ int args_length = iterator->Next() - 1; // Skip receiver.
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
@@ -852,15 +1023,76 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
isolate_->heap()->arguments_marker()->ShortPrint();
PrintF(" ; arguments object\n");
}
+ // Use the arguments marker value as a sentinel and fill in the arguments
+ // object after the deoptimized frame is built.
intptr_t value = reinterpret_cast<intptr_t>(
isolate_->heap()->arguments_marker());
+ AddArgumentsObject(
+ output_[frame_index]->GetTop() + output_offset, args_length);
output_[frame_index]->SetFrameSlot(output_offset, value);
+ // We save the tagged argument values on the side and materialize the
+ // actual arguments object after the deoptimized frame is built.
+ for (int i = 0; i < args_length; i++) {
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(args_index + i);
+ intptr_t input_value = input_->GetFrameSlot(input_offset);
+ AddArgumentsObjectValue(input_value);
+ }
return;
}
}
}
+static bool ObjectToInt32(Object* obj, int32_t* value) {
+ if (obj->IsSmi()) {
+ *value = Smi::cast(obj)->value();
+ return true;
+ }
+
+ if (obj->IsHeapNumber()) {
+ double num = HeapNumber::cast(obj)->value();
+ if (FastI2D(FastD2I(num)) != num) {
+ if (FLAG_trace_osr) {
+ PrintF("**** %g could not be converted to int32 ****\n",
+ HeapNumber::cast(obj)->value());
+ }
+ return false;
+ }
+
+ *value = FastD2I(num);
+ return true;
+ }
+
+ return false;
+}
+
+
+static bool ObjectToUint32(Object* obj, uint32_t* value) {
+ if (obj->IsSmi()) {
+ if (Smi::cast(obj)->value() < 0) return false;
+
+ *value = static_cast<uint32_t>(Smi::cast(obj)->value());
+ return true;
+ }
+
+ if (obj->IsHeapNumber()) {
+ double num = HeapNumber::cast(obj)->value();
+ if ((num < 0) || (FastUI2D(FastD2UI(num)) != num)) {
+ if (FLAG_trace_osr) {
+ PrintF("**** %g could not be converted to uint32 ****\n",
+ HeapNumber::cast(obj)->value());
+ }
+ return false;
+ }
+
+ *value = FastD2UI(num);
+ return true;
+ }
+
+ return false;
+}
+
+
bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int* input_offset) {
disasm::NameConverter converter;
@@ -883,6 +1115,8 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
@@ -900,22 +1134,10 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
case Translation::INT32_REGISTER: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
+ int32_t int32_value = 0;
+ if (!ObjectToInt32(input_object, &int32_value)) return false;
int output_reg = iterator->Next();
- int int32_value = input_object->IsSmi()
- ? Smi::cast(input_object)->value()
- : FastD2I(input_object->Number());
- // Abort the translation if the conversion lost information.
- if (!input_object->IsSmi() &&
- FastI2D(int32_value) != input_object->Number()) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to int32 ****\n",
- input_object->Number());
- }
- return false;
- }
if (FLAG_trace_osr) {
PrintF(" %s <- %d (int32) ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
@@ -926,6 +1148,21 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
break;
}
+ case Translation::UINT32_REGISTER: {
+ uint32_t uint32_value = 0;
+ if (!ObjectToUint32(input_object, &uint32_value)) return false;
+
+ int output_reg = iterator->Next();
+ if (FLAG_trace_osr) {
+ PrintF(" %s <- %u (uint32) ; [sp + %d]\n",
+ converter.NameOfCPURegister(output_reg),
+ uint32_value,
+ *input_offset);
+ }
+ output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
+ }
+
+
case Translation::DOUBLE_REGISTER: {
// Abort OSR if we don't have a number.
if (!input_object->IsNumber()) return false;
@@ -959,24 +1196,12 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
case Translation::INT32_STACK_SLOT: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
+ int32_t int32_value = 0;
+ if (!ObjectToInt32(input_object, &int32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(output_index);
- int int32_value = input_object->IsSmi()
- ? Smi::cast(input_object)->value()
- : DoubleToInt32(input_object->Number());
- // Abort the translation if the conversion lost information.
- if (!input_object->IsSmi() &&
- FastI2D(int32_value) != input_object->Number()) {
- if (FLAG_trace_osr) {
- PrintF("**** %g could not be converted to int32 ****\n",
- input_object->Number());
- }
- return false;
- }
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
output_offset,
@@ -987,6 +1212,23 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
break;
}
+ case Translation::UINT32_STACK_SLOT: {
+ uint32_t uint32_value = 0;
+ if (!ObjectToUint32(input_object, &uint32_value)) return false;
+
+ int output_index = iterator->Next();
+ unsigned output_offset =
+ output->GetOffsetFromSlotIndex(output_index);
+ if (FLAG_trace_osr) {
+ PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
+ output_offset,
+ uint32_value,
+ *input_offset);
+ }
+ output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
+ break;
+ }
+
case Translation::DOUBLE_STACK_SLOT: {
static const int kLowerOffset = 0 * kPointerSize;
static const int kUpperOffset = 1 * kPointerSize;
@@ -1136,39 +1378,63 @@ Object* Deoptimizer::ComputeLiteral(int index) const {
}
-void Deoptimizer::AddDoubleValue(intptr_t slot_address,
- double value) {
+void Deoptimizer::AddArgumentsObject(intptr_t slot_address, int argc) {
+ ArgumentsObjectMaterializationDescriptor object_desc(
+ reinterpret_cast<Address>(slot_address), argc);
+ deferred_arguments_objects_.Add(object_desc);
+}
+
+
+void Deoptimizer::AddArgumentsObjectValue(intptr_t value) {
+ deferred_arguments_objects_values_.Add(reinterpret_cast<Object*>(value));
+}
+
+
+void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
HeapNumberMaterializationDescriptor value_desc(
reinterpret_cast<Address>(slot_address), value);
deferred_heap_numbers_.Add(value_desc);
}
-MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
+void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
+ int max_entry_id) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
ASSERT(!Serializer::enabled());
+ ASSERT(type == EAGER || type == LAZY);
+ DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+ int entry_count = (type == EAGER)
+ ? data->eager_deoptimization_entry_code_entries_
+ : data->lazy_deoptimization_entry_code_entries_;
+ if (max_entry_id < entry_count) return;
+ entry_count = Min(Max(entry_count * 2, Deoptimizer::kMinNumberOfEntries),
+ Deoptimizer::kMaxNumberOfEntries);
+
MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
+ GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
- MemoryChunk* chunk =
- Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
- EXECUTABLE,
- NULL);
- ASSERT(chunk->area_size() >= desc.instr_size);
- if (chunk == NULL) {
- V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
+ VirtualMemory* memory = type == EAGER
+ ? data->eager_deoptimization_entry_code_
+ : data->lazy_deoptimization_entry_code_;
+ size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
+ ASSERT(static_cast<int>(table_size) >= desc.instr_size);
+ memory->Commit(memory->address(), table_size, true);
+ memcpy(memory->address(), desc.buffer, desc.instr_size);
+ CPU::FlushICache(memory->address(), desc.instr_size);
+
+ if (type == EAGER) {
+ data->eager_deoptimization_entry_code_entries_ = entry_count;
+ } else {
+ data->lazy_deoptimization_entry_code_entries_ = entry_count;
}
- memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
- CPU::FlushICache(chunk->area_start(), desc.instr_size);
- return chunk;
}
@@ -1210,6 +1476,54 @@ void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
}
+static Object* CutOutRelatedFunctionsList(Context* context,
+ Code* code,
+ Object* undefined) {
+ Object* result_list_head = undefined;
+ Object* head;
+ Object* current;
+ current = head = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+ JSFunction* prev = NULL;
+ while (current != undefined) {
+ JSFunction* func = JSFunction::cast(current);
+ current = func->next_function_link();
+ if (func->code() == code) {
+ func->set_next_function_link(result_list_head);
+ result_list_head = func;
+ if (prev) {
+ prev->set_next_function_link(current);
+ } else {
+ head = current;
+ }
+ } else {
+ prev = func;
+ }
+ }
+ if (head != context->get(Context::OPTIMIZED_FUNCTIONS_LIST)) {
+ context->set(Context::OPTIMIZED_FUNCTIONS_LIST, head);
+ }
+ return result_list_head;
+}
+
+
+void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function,
+ Code* code) {
+ Context* context = function->context()->native_context();
+
+ SharedFunctionInfo* shared = function->shared();
+
+ Object* undefined = Isolate::Current()->heap()->undefined_value();
+ Object* current = CutOutRelatedFunctionsList(context, code, undefined);
+
+ while (current != undefined) {
+ JSFunction* func = JSFunction::cast(current);
+ current = func->next_function_link();
+ func->set_code(shared->code());
+ func->set_next_function_link(undefined);
+ }
+}
+
+
FrameDescription::FrameDescription(uint32_t frame_size,
JSFunction* function)
: frame_size_(frame_size),
@@ -1290,7 +1604,7 @@ Object* FrameDescription::GetExpression(int index) {
}
-void TranslationBuffer::Add(int32_t value) {
+void TranslationBuffer::Add(int32_t value, Zone* zone) {
// Encode the sign bit in the least significant bit.
bool is_negative = (value < 0);
uint32_t bits = ((is_negative ? -value : value) << 1) |
@@ -1299,7 +1613,7 @@ void TranslationBuffer::Add(int32_t value) {
// each byte to indicate whether or not more bytes follow.
do {
uint32_t next = bits >> 7;
- contents_.Add(((bits << 1) & 0xFF) | (next != 0));
+ contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
bits = next;
} while (bits != 0);
}
@@ -1332,95 +1646,127 @@ Handle<ByteArray> TranslationBuffer::CreateByteArray() {
void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
- buffer_->Add(CONSTRUCT_STUB_FRAME);
- buffer_->Add(literal_id);
- buffer_->Add(height);
+ buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
+ buffer_->Add(literal_id, zone());
+ buffer_->Add(height, zone());
+}
+
+
+void Translation::BeginGetterStubFrame(int literal_id) {
+ buffer_->Add(GETTER_STUB_FRAME, zone());
+ buffer_->Add(literal_id, zone());
+}
+
+
+void Translation::BeginSetterStubFrame(int literal_id) {
+ buffer_->Add(SETTER_STUB_FRAME, zone());
+ buffer_->Add(literal_id, zone());
}
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
- buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
- buffer_->Add(literal_id);
- buffer_->Add(height);
+ buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
+ buffer_->Add(literal_id, zone());
+ buffer_->Add(height, zone());
}
-void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) {
- buffer_->Add(JS_FRAME);
- buffer_->Add(node_id);
- buffer_->Add(literal_id);
- buffer_->Add(height);
+void Translation::BeginJSFrame(BailoutId node_id,
+ int literal_id,
+ unsigned height) {
+ buffer_->Add(JS_FRAME, zone());
+ buffer_->Add(node_id.ToInt(), zone());
+ buffer_->Add(literal_id, zone());
+ buffer_->Add(height, zone());
}
void Translation::StoreRegister(Register reg) {
- buffer_->Add(REGISTER);
- buffer_->Add(reg.code());
+ buffer_->Add(REGISTER, zone());
+ buffer_->Add(reg.code(), zone());
}
void Translation::StoreInt32Register(Register reg) {
- buffer_->Add(INT32_REGISTER);
- buffer_->Add(reg.code());
+ buffer_->Add(INT32_REGISTER, zone());
+ buffer_->Add(reg.code(), zone());
+}
+
+
+void Translation::StoreUint32Register(Register reg) {
+ buffer_->Add(UINT32_REGISTER, zone());
+ buffer_->Add(reg.code(), zone());
}
void Translation::StoreDoubleRegister(DoubleRegister reg) {
- buffer_->Add(DOUBLE_REGISTER);
- buffer_->Add(DoubleRegister::ToAllocationIndex(reg));
+ buffer_->Add(DOUBLE_REGISTER, zone());
+ buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
}
void Translation::StoreStackSlot(int index) {
- buffer_->Add(STACK_SLOT);
- buffer_->Add(index);
+ buffer_->Add(STACK_SLOT, zone());
+ buffer_->Add(index, zone());
}
void Translation::StoreInt32StackSlot(int index) {
- buffer_->Add(INT32_STACK_SLOT);
- buffer_->Add(index);
+ buffer_->Add(INT32_STACK_SLOT, zone());
+ buffer_->Add(index, zone());
+}
+
+
+void Translation::StoreUint32StackSlot(int index) {
+ buffer_->Add(UINT32_STACK_SLOT, zone());
+ buffer_->Add(index, zone());
}
void Translation::StoreDoubleStackSlot(int index) {
- buffer_->Add(DOUBLE_STACK_SLOT);
- buffer_->Add(index);
+ buffer_->Add(DOUBLE_STACK_SLOT, zone());
+ buffer_->Add(index, zone());
}
void Translation::StoreLiteral(int literal_id) {
- buffer_->Add(LITERAL);
- buffer_->Add(literal_id);
+ buffer_->Add(LITERAL, zone());
+ buffer_->Add(literal_id, zone());
}
-void Translation::StoreArgumentsObject() {
- buffer_->Add(ARGUMENTS_OBJECT);
+void Translation::StoreArgumentsObject(int args_index, int args_length) {
+ buffer_->Add(ARGUMENTS_OBJECT, zone());
+ buffer_->Add(args_index, zone());
+ buffer_->Add(args_length, zone());
}
void Translation::MarkDuplicate() {
- buffer_->Add(DUPLICATE);
+ buffer_->Add(DUPLICATE, zone());
}
int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) {
- case ARGUMENTS_OBJECT:
case DUPLICATE:
return 0;
+ case GETTER_STUB_FRAME:
+ case SETTER_STUB_FRAME:
case REGISTER:
case INT32_REGISTER:
+ case UINT32_REGISTER:
case DOUBLE_REGISTER:
case STACK_SLOT:
case INT32_STACK_SLOT:
+ case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
case CONSTRUCT_STUB_FRAME:
+ case ARGUMENTS_OBJECT:
return 2;
case JS_FRAME:
return 3;
@@ -1442,16 +1788,24 @@ const char* Translation::StringFor(Opcode opcode) {
return "ARGUMENTS_ADAPTOR_FRAME";
case CONSTRUCT_STUB_FRAME:
return "CONSTRUCT_STUB_FRAME";
+ case GETTER_STUB_FRAME:
+ return "GETTER_STUB_FRAME";
+ case SETTER_STUB_FRAME:
+ return "SETTER_STUB_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
return "INT32_REGISTER";
+ case UINT32_REGISTER:
+ return "UINT32_REGISTER";
case DOUBLE_REGISTER:
return "DOUBLE_REGISTER";
case STACK_SLOT:
return "STACK_SLOT";
case INT32_STACK_SLOT:
return "INT32_STACK_SLOT";
+ case UINT32_STACK_SLOT:
+ return "UINT32_STACK_SLOT";
case DOUBLE_STACK_SLOT:
return "DOUBLE_STACK_SLOT";
case LITERAL:
@@ -1498,6 +1852,8 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME:
// Peeled off before getting here.
break;
@@ -1507,6 +1863,7 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::REGISTER:
case Translation::INT32_REGISTER:
+ case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
case Translation::DUPLICATE:
// We are at safepoint which corresponds to call. All registers are
@@ -1526,6 +1883,12 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
return SlotRef(slot_addr, SlotRef::INT32);
}
+ case Translation::UINT32_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::UINT32);
+ }
+
case Translation::DOUBLE_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
@@ -1565,7 +1928,7 @@ Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
int inlined_jsframe_index,
int formal_parameter_count) {
AssertNoAllocation no_gc;
- int deopt_index = AstNode::kNoNumber;
+ int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
diff --git a/src/3rdparty/v8/src/deoptimizer.h b/src/3rdparty/v8/src/deoptimizer.h
index 6bc4a51..4aa38ce 100644
--- a/src/3rdparty/v8/src/deoptimizer.h
+++ b/src/3rdparty/v8/src/deoptimizer.h
@@ -57,18 +57,32 @@ class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
};
+class ArgumentsObjectMaterializationDescriptor BASE_EMBEDDED {
+ public:
+ ArgumentsObjectMaterializationDescriptor(Address slot_address, int argc)
+ : slot_address_(slot_address), arguments_length_(argc) { }
+
+ Address slot_address() const { return slot_address_; }
+ int arguments_length() const { return arguments_length_; }
+
+ private:
+ Address slot_address_;
+ int arguments_length_;
+};
+
+
class OptimizedFunctionVisitor BASE_EMBEDDED {
public:
virtual ~OptimizedFunctionVisitor() {}
// Function which is called before iteration of any optimized functions
- // from given global context.
+ // from given native context.
virtual void EnterContext(Context* context) = 0;
virtual void VisitFunction(JSFunction* function) = 0;
// Function which is called after iteration of all optimized functions
- // from given global context.
+ // from given native context.
virtual void LeaveContext(Context* context) = 0;
};
@@ -86,8 +100,10 @@ class DeoptimizerData {
#endif
private:
- MemoryChunk* eager_deoptimization_entry_code_;
- MemoryChunk* lazy_deoptimization_entry_code_;
+ int eager_deoptimization_entry_code_entries_;
+ int lazy_deoptimization_entry_code_entries_;
+ VirtualMemory* eager_deoptimization_entry_code_;
+ VirtualMemory* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -152,6 +168,10 @@ class Deoptimizer : public Malloced {
// execution returns.
static void DeoptimizeFunction(JSFunction* function);
+ // Iterate over all the functions which share the same code object
+ // and make them use unoptimized version.
+ static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
+
// Deoptimize all functions in the heap.
static void DeoptimizeAll();
@@ -196,7 +216,7 @@ class Deoptimizer : public Malloced {
~Deoptimizer();
- void MaterializeHeapNumbers();
+ void MaterializeHeapObjects(JavaScriptFrameIterator* it);
#ifdef ENABLE_DEBUGGER_SUPPORT
void MaterializeHeapNumbersForDebuggerInspectableFrame(
Address parameters_top,
@@ -208,10 +228,20 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
- static Address GetDeoptimizationEntry(int id, BailoutType type);
+
+ enum GetEntryMode {
+ CALCULATE_ENTRY_ADDRESS,
+ ENSURE_ENTRY_CODE
+ };
+
+
+ static Address GetDeoptimizationEntry(
+ int id,
+ BailoutType type,
+ GetEntryMode mode = ENSURE_ENTRY_CODE);
static int GetDeoptimizationId(Address addr, BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data,
- unsigned node_id,
+ BailoutId node_id,
SharedFunctionInfo* shared);
// Code generation support.
@@ -221,6 +251,10 @@ class Deoptimizer : public Malloced {
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+ static int has_alignment_padding_offset() {
+ return OFFSET_OF(Deoptimizer, has_alignment_padding_);
+ }
+
static int GetDeoptimizedCodeCount(Isolate* isolate);
static const int kNotDeoptimizationEntry = -1;
@@ -261,8 +295,11 @@ class Deoptimizer : public Malloced {
int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
+ static size_t GetMaxDeoptTableSize();
+
private:
- static const int kNumberOfEntries = 16384;
+ static const int kMinNumberOfEntries = 64;
+ static const int kMaxNumberOfEntries = 16384;
Deoptimizer(Isolate* isolate,
JSFunction* function,
@@ -280,6 +317,9 @@ class Deoptimizer : public Malloced {
int frame_index);
void DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index);
+ void DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@@ -298,9 +338,12 @@ class Deoptimizer : public Malloced {
Object* ComputeLiteral(int index) const;
+ void AddArgumentsObject(intptr_t slot_address, int argc);
+ void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value);
- static MemoryChunk* CreateCode(BailoutType type);
+ static void EnsureCodeForDeoptimizationEntry(BailoutType type,
+ int max_entry_id);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@@ -322,6 +365,7 @@ class Deoptimizer : public Malloced {
BailoutType bailout_type_;
Address from_;
int fp_to_sp_delta_;
+ int has_alignment_padding_;
// Input frame description.
FrameDescription* input_;
@@ -332,6 +376,8 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
+ List<Object*> deferred_arguments_objects_values_;
+ List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_;
@@ -515,10 +561,10 @@ class FrameDescription {
class TranslationBuffer BASE_EMBEDDED {
public:
- TranslationBuffer() : contents_(256) { }
+ explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
int CurrentIndex() const { return contents_.length(); }
- void Add(int32_t value);
+ void Add(int32_t value, Zone* zone);
Handle<ByteArray> CreateByteArray();
@@ -554,12 +600,16 @@ class Translation BASE_EMBEDDED {
BEGIN,
JS_FRAME,
CONSTRUCT_STUB_FRAME,
+ GETTER_STUB_FRAME,
+ SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
REGISTER,
INT32_REGISTER,
+ UINT32_REGISTER,
DOUBLE_REGISTER,
STACK_SLOT,
INT32_STACK_SLOT,
+ UINT32_STACK_SLOT,
DOUBLE_STACK_SLOT,
LITERAL,
ARGUMENTS_OBJECT,
@@ -569,39 +619,51 @@ class Translation BASE_EMBEDDED {
DUPLICATE
};
- Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count)
+ Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
+ Zone* zone)
: buffer_(buffer),
- index_(buffer->CurrentIndex()) {
- buffer_->Add(BEGIN);
- buffer_->Add(frame_count);
- buffer_->Add(jsframe_count);
+ index_(buffer->CurrentIndex()),
+ zone_(zone) {
+ buffer_->Add(BEGIN, zone);
+ buffer_->Add(frame_count, zone);
+ buffer_->Add(jsframe_count, zone);
}
int index() const { return index_; }
// Commands.
- void BeginJSFrame(int node_id, int literal_id, unsigned height);
+ void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
+ void BeginGetterStubFrame(int literal_id);
+ void BeginSetterStubFrame(int literal_id);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
+ void StoreUint32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
+ void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
- void StoreArgumentsObject();
+ void StoreArgumentsObject(int args_index, int args_length);
void MarkDuplicate();
+ Zone* zone() const { return zone_; }
+
static int NumberOfOperandsFor(Opcode opcode);
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
static const char* StringFor(Opcode opcode);
#endif
+ // A literal id which refers to the JSFunction itself.
+ static const int kSelfLiteralId = -239;
+
private:
TranslationBuffer* buffer_;
int index_;
+ Zone* zone_;
};
@@ -631,6 +693,7 @@ class SlotRef BASE_EMBEDDED {
UNKNOWN,
TAGGED,
INT32,
+ UINT32,
DOUBLE,
LITERAL
};
@@ -658,6 +721,16 @@ class SlotRef BASE_EMBEDDED {
}
}
+ case UINT32: {
+ uint32_t value = Memory::uint32_at(addr_);
+ if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
+ return Handle<Object>(Smi::FromInt(static_cast<int>(value)));
+ } else {
+ return Isolate::Current()->factory()->NewNumber(
+ static_cast<double>(value));
+ }
+ }
+
case DOUBLE: {
double value = Memory::double_at(addr_);
return Isolate::Current()->factory()->NewNumber(value);
diff --git a/src/3rdparty/v8/src/disassembler.cc b/src/3rdparty/v8/src/disassembler.cc
index e3b40ab..9f8b9a8 100644
--- a/src/3rdparty/v8/src/disassembler.cc
+++ b/src/3rdparty/v8/src/disassembler.cc
@@ -244,8 +244,8 @@ static int DecodeIt(FILE* f,
out.AddFormatted(" %s, %s", Code::Kind2String(kind),
Code::ICState2String(ic_state));
if (ic_state == MONOMORPHIC) {
- PropertyType type = code->type();
- out.AddFormatted(", %s", Code::PropertyType2String(type));
+ Code::StubType type = code->type();
+ out.AddFormatted(", %s", Code::StubType2String(type));
}
if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
diff --git a/src/3rdparty/v8/src/elements-kind.cc b/src/3rdparty/v8/src/elements-kind.cc
new file mode 100644
index 0000000..19fac44
--- /dev/null
+++ b/src/3rdparty/v8/src/elements-kind.cc
@@ -0,0 +1,139 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "elements-kind.h"
+
+#include "api.h"
+#include "elements.h"
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+const char* ElementsKindToString(ElementsKind kind) {
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+ return accessor->name();
+}
+
+
+void PrintElementsKind(FILE* out, ElementsKind kind) {
+ FPrintF(out, "%s", ElementsKindToString(kind));
+}
+
+
+ElementsKind GetInitialFastElementsKind() {
+ if (FLAG_packed_arrays) {
+ return FAST_SMI_ELEMENTS;
+ } else {
+ return FAST_HOLEY_SMI_ELEMENTS;
+ }
+}
+
+
+struct InitializeFastElementsKindSequence {
+ static void Construct(
+ ElementsKind** fast_elements_kind_sequence_ptr) {
+ ElementsKind* fast_elements_kind_sequence =
+ new ElementsKind[kFastElementsKindCount];
+ *fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
+ fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS;
+ fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS;
+ fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS;
+ fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
+ fast_elements_kind_sequence[4] = FAST_ELEMENTS;
+ fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
+ }
+};
+
+
+static LazyInstance<ElementsKind*,
+ InitializeFastElementsKindSequence>::type
+ fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
+
+
+ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
+ ASSERT(sequence_number >= 0 &&
+ sequence_number < kFastElementsKindCount);
+ return fast_elements_kind_sequence.Get()[sequence_number];
+}
+
+int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
+ for (int i = 0; i < kFastElementsKindCount; ++i) {
+ if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
+ return i;
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
+ bool allow_only_packed) {
+ ASSERT(IsFastElementsKind(elements_kind));
+ ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
+ while (true) {
+ int index =
+ GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
+ elements_kind = GetFastElementsKindFromSequenceIndex(index);
+ if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
+ return elements_kind;
+ }
+ }
+ UNREACHABLE();
+ return TERMINAL_FAST_ELEMENTS_KIND;
+}
+
+
+bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ switch (from_kind) {
+ case FAST_SMI_ELEMENTS:
+ return to_kind != FAST_SMI_ELEMENTS;
+ case FAST_HOLEY_SMI_ELEMENTS:
+ return to_kind != FAST_SMI_ELEMENTS &&
+ to_kind != FAST_HOLEY_SMI_ELEMENTS;
+ case FAST_DOUBLE_ELEMENTS:
+ return to_kind != FAST_SMI_ELEMENTS &&
+ to_kind != FAST_HOLEY_SMI_ELEMENTS &&
+ to_kind != FAST_DOUBLE_ELEMENTS;
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return to_kind == FAST_ELEMENTS ||
+ to_kind == FAST_HOLEY_ELEMENTS;
+ case FAST_ELEMENTS:
+ return to_kind == FAST_HOLEY_ELEMENTS;
+ case FAST_HOLEY_ELEMENTS:
+ return false;
+ default:
+ return false;
+ }
+}
+
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/elements-kind.h b/src/3rdparty/v8/src/elements-kind.h
new file mode 100644
index 0000000..cb3bb9c
--- /dev/null
+++ b/src/3rdparty/v8/src/elements-kind.h
@@ -0,0 +1,229 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ELEMENTS_KIND_H_
+#define V8_ELEMENTS_KIND_H_
+
+#include "v8checks.h"
+
+namespace v8 {
+namespace internal {
+
+enum ElementsKind {
+ // The "fast" kind for elements that only contain SMI values. Must be first
+ // to make it possible to efficiently check maps for this kind.
+ FAST_SMI_ELEMENTS,
+ FAST_HOLEY_SMI_ELEMENTS,
+
+ // The "fast" kind for tagged values. Must be second to make it possible to
+ // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
+ // together at once.
+ FAST_ELEMENTS,
+ FAST_HOLEY_ELEMENTS,
+
+ // The "fast" kind for unwrapped, non-tagged double values.
+ FAST_DOUBLE_ELEMENTS,
+ FAST_HOLEY_DOUBLE_ELEMENTS,
+
+ // The "slow" kind.
+ DICTIONARY_ELEMENTS,
+ NON_STRICT_ARGUMENTS_ELEMENTS,
+ // The "fast" kind for external arrays
+ EXTERNAL_BYTE_ELEMENTS,
+ EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
+ EXTERNAL_SHORT_ELEMENTS,
+ EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
+ EXTERNAL_INT_ELEMENTS,
+ EXTERNAL_UNSIGNED_INT_ELEMENTS,
+ EXTERNAL_FLOAT_ELEMENTS,
+ EXTERNAL_DOUBLE_ELEMENTS,
+ EXTERNAL_PIXEL_ELEMENTS,
+
+ // Derived constants from ElementsKind
+ FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
+ LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+ FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
+ LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
+ FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
+ LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+ TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
+};
+
+const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
+const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
+ FIRST_FAST_ELEMENTS_KIND + 1;
+
+const char* ElementsKindToString(ElementsKind kind);
+void PrintElementsKind(FILE* out, ElementsKind kind);
+
+ElementsKind GetInitialFastElementsKind();
+
+ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
+
+int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
+
+
+inline bool IsDictionaryElementsKind(ElementsKind kind) {
+ return kind == DICTIONARY_ELEMENTS;
+}
+
+
+inline bool IsExternalArrayElementsKind(ElementsKind kind) {
+ return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+ kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+}
+
+
+inline bool IsFastElementsKind(ElementsKind kind) {
+ ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
+ return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
+}
+
+
+inline bool IsFastDoubleElementsKind(ElementsKind kind) {
+ return kind == FAST_DOUBLE_ELEMENTS ||
+ kind == FAST_HOLEY_DOUBLE_ELEMENTS;
+}
+
+
+inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
+ return IsFastDoubleElementsKind(kind) ||
+ kind == EXTERNAL_DOUBLE_ELEMENTS ||
+ kind == EXTERNAL_FLOAT_ELEMENTS;
+}
+
+
+inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
+ return kind == FAST_SMI_ELEMENTS ||
+ kind == FAST_HOLEY_SMI_ELEMENTS ||
+ kind == FAST_ELEMENTS ||
+ kind == FAST_HOLEY_ELEMENTS;
+}
+
+
+inline bool IsFastSmiElementsKind(ElementsKind kind) {
+ return kind == FAST_SMI_ELEMENTS ||
+ kind == FAST_HOLEY_SMI_ELEMENTS;
+}
+
+
+inline bool IsFastObjectElementsKind(ElementsKind kind) {
+ return kind == FAST_ELEMENTS ||
+ kind == FAST_HOLEY_ELEMENTS;
+}
+
+
+inline bool IsFastHoleyElementsKind(ElementsKind kind) {
+ return kind == FAST_HOLEY_SMI_ELEMENTS ||
+ kind == FAST_HOLEY_DOUBLE_ELEMENTS ||
+ kind == FAST_HOLEY_ELEMENTS;
+}
+
+
+inline bool IsHoleyElementsKind(ElementsKind kind) {
+ return IsFastHoleyElementsKind(kind) ||
+ kind == DICTIONARY_ELEMENTS;
+}
+
+
+inline bool IsFastPackedElementsKind(ElementsKind kind) {
+ return kind == FAST_SMI_ELEMENTS ||
+ kind == FAST_DOUBLE_ELEMENTS ||
+ kind == FAST_ELEMENTS;
+}
+
+
+inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
+ if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ return FAST_SMI_ELEMENTS;
+ }
+ if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ return FAST_DOUBLE_ELEMENTS;
+ }
+ if (holey_kind == FAST_HOLEY_ELEMENTS) {
+ return FAST_ELEMENTS;
+ }
+ return holey_kind;
+}
+
+
+inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
+ if (packed_kind == FAST_SMI_ELEMENTS) {
+ return FAST_HOLEY_SMI_ELEMENTS;
+ }
+ if (packed_kind == FAST_DOUBLE_ELEMENTS) {
+ return FAST_HOLEY_DOUBLE_ELEMENTS;
+ }
+ if (packed_kind == FAST_ELEMENTS) {
+ return FAST_HOLEY_ELEMENTS;
+ }
+ return packed_kind;
+}
+
+
+inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
+ ASSERT(IsFastSmiElementsKind(from_kind));
+ return (from_kind == FAST_SMI_ELEMENTS)
+ ? FAST_ELEMENTS
+ : FAST_HOLEY_ELEMENTS;
+}
+
+
+inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ return (GetHoleyElementsKind(from_kind) == to_kind) ||
+ (IsFastSmiElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind));
+}
+
+
+bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+ ElementsKind to_kind);
+
+
+inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
+ return IsFastElementsKind(from_kind) &&
+ from_kind != TERMINAL_FAST_ELEMENTS_KIND;
+}
+
+
+ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
+ bool allow_only_packed);
+
+
+inline bool CanTransitionToMoreGeneralFastElementsKind(
+ ElementsKind elements_kind,
+ bool allow_only_packed) {
+ return IsFastElementsKind(elements_kind) &&
+ (elements_kind != TERMINAL_FAST_ELEMENTS_KIND &&
+ (!allow_only_packed || elements_kind != FAST_ELEMENTS));
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ELEMENTS_KIND_H_
diff --git a/src/3rdparty/v8/src/elements.cc b/src/3rdparty/v8/src/elements.cc
index 9b8548d..8cb48c6 100644
--- a/src/3rdparty/v8/src/elements.cc
+++ b/src/3rdparty/v8/src/elements.cc
@@ -39,8 +39,14 @@
// Inheritance hierarchy:
// - ElementsAccessorBase (abstract)
// - FastElementsAccessor (abstract)
-// - FastObjectElementsAccessor
+// - FastSmiOrObjectElementsAccessor
+// - FastPackedSmiElementsAccessor
+// - FastHoleySmiElementsAccessor
+// - FastPackedObjectElementsAccessor
+// - FastHoleyObjectElementsAccessor
// - FastDoubleElementsAccessor
+// - FastPackedDoubleElementsAccessor
+// - FastHoleyDoubleElementsAccessor
// - ExternalElementsAccessor (abstract)
// - ExternalByteElementsAccessor
// - ExternalUnsignedByteElementsAccessor
@@ -59,15 +65,24 @@ namespace v8 {
namespace internal {
+static const int kPackedSizeNotKnown = -1;
+
+
// First argument in list is the accessor class, the second argument is the
// accessor ElementsKind, and the third is the backing store class. Use the
// fast element handler for smi-only arrays. The implementation is currently
// identical. Note that the order must match that of the ElementsKind enum for
// the |accessor_array[]| below to work.
#define ELEMENTS_LIST(V) \
- V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray) \
- V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
- V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
+ V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \
+ V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \
+ FixedArray) \
+ V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
+ V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
+ V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \
+ FixedDoubleArray) \
+ V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
+ FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \
V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
@@ -139,8 +154,6 @@ void CopyObjectToObjectElements(FixedArray* from,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to->map() != HEAP->fixed_cow_array_map());
- ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
- ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -148,7 +161,7 @@ void CopyObjectToObjectElements(FixedArray* from,
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
- // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+ // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@@ -160,12 +173,15 @@ void CopyObjectToObjectElements(FixedArray* from,
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
+ ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
+ ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
copy_size);
- if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ if (IsFastObjectElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
@@ -190,7 +206,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
#ifdef DEBUG
- // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+ // Fast object arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@@ -200,7 +216,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
#endif
}
ASSERT(to != from);
- ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
+ ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
@@ -216,7 +232,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
to->set_the_hole(i + to_start);
}
}
- if (to_kind == FAST_ELEMENTS) {
+ if (IsFastObjectElementsKind(to_kind)) {
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
@@ -234,7 +250,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
- ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
+ ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -242,7 +258,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
- // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+ // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@@ -255,14 +271,14 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
- if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
return Failure::Exception();
} else {
MaybeObject* maybe_value = from->get(i + from_start);
Object* value;
- ASSERT(to_kind == FAST_ELEMENTS);
- // Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
+ ASSERT(IsFastObjectElementsKind(to_kind));
+ // Because Double -> Object elements transitions allocate HeapObjects
// iteratively, the allocate must succeed within a single GC cycle,
// otherwise the retry after the GC will also fail. In order to ensure
// that no GC is triggered, allocate HeapNumbers from old space if they
@@ -313,6 +329,76 @@ static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
}
+static void CopySmiToDoubleElements(FixedArray* from,
+ uint32_t from_start,
+ FixedDoubleArray* to,
+ uint32_t to_start,
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->length() - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
+ }
+ }
+ }
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
+ if (copy_size == 0) return;
+ Object* the_hole = from->GetHeap()->the_hole_value();
+ for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
+ from_start < from_end; from_start++, to_start++) {
+ Object* hole_or_smi = from->get(from_start);
+ if (hole_or_smi == the_hole) {
+ to->set_the_hole(to_start);
+ } else {
+ to->set(to_start, Smi::cast(hole_or_smi)->value());
+ }
+ }
+}
+
+
+static void CopyPackedSmiToDoubleElements(FixedArray* from,
+ uint32_t from_start,
+ FixedDoubleArray* to,
+ uint32_t to_start,
+ int packed_size,
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ uint32_t to_end;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->length() - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ to_end = to->length();
+ } else {
+ to_end = to_start + static_cast<uint32_t>(copy_size);
+ }
+ } else {
+ to_end = to_start + static_cast<uint32_t>(copy_size);
+ }
+ ASSERT(static_cast<int>(to_end) <= to->length());
+ ASSERT(packed_size >= 0 && packed_size <= copy_size);
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
+ if (copy_size == 0) return;
+ for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
+ from_start < from_end; from_start++, to_start++) {
+ Object* smi = from->get(from_start);
+ ASSERT(!smi->IsTheHole());
+ to->set(to_start, Smi::cast(smi)->value());
+ }
+
+ while (to_start < to_end) {
+ to->set_the_hole(to_start++);
+ }
+}
+
+
static void CopyObjectToDoubleElements(FixedArray* from,
uint32_t from_start,
FixedDoubleArray* to,
@@ -332,12 +418,14 @@ static void CopyObjectToDoubleElements(FixedArray* from,
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- for (int i = 0; i < copy_size; i++) {
- Object* hole_or_object = from->get(i + from_start);
- if (hole_or_object->IsTheHole()) {
- to->set_the_hole(i + to_start);
+ Object* the_hole = from->GetHeap()->the_hole_value();
+ for (uint32_t from_end = from_start + copy_size;
+ from_start < from_end; from_start++, to_start++) {
+ Object* hole_or_object = from->get(from_start);
+ if (hole_or_object == the_hole) {
+ to->set_the_hole(to_start);
} else {
- to->set(i + to_start, hole_or_object->Number());
+ to->set(to_start, hole_or_object->Number());
}
}
}
@@ -404,13 +492,44 @@ class ElementsAccessorBase : public ElementsAccessor {
virtual ElementsKind kind() const { return ElementsTraits::Kind; }
+ static void ValidateContents(JSObject* holder, int length) {
+ }
+
+ static void ValidateImpl(JSObject* holder) {
+ FixedArrayBase* fixed_array_base = holder->elements();
+ // When objects are first allocated, its elements are Failures.
+ if (fixed_array_base->IsFailure()) return;
+ if (!fixed_array_base->IsHeapObject()) return;
+ Map* map = fixed_array_base->map();
+ // Arrays that have been shifted in place can't be verified.
+ Heap* heap = holder->GetHeap();
+ if (map == heap->one_pointer_filler_map() ||
+ map == heap->two_pointer_filler_map() ||
+ map == heap->free_space_map()) {
+ return;
+ }
+ int length = 0;
+ if (holder->IsJSArray()) {
+ Object* length_obj = JSArray::cast(holder)->length();
+ if (length_obj->IsSmi()) {
+ length = Smi::cast(length_obj)->value();
+ }
+ } else {
+ length = fixed_array_base->length();
+ }
+ ElementsAccessorSubclass::ValidateContents(holder, length);
+ }
+
+ virtual void Validate(JSObject* holder) {
+ ElementsAccessorSubclass::ValidateImpl(holder);
+ }
+
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
BackingStore* backing_store) {
- MaybeObject* element =
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, backing_store);
- return !element->IsTheHole();
+ return ElementsAccessorSubclass::GetAttributesImpl(
+ receiver, holder, key, backing_store) != ABSENT;
}
virtual bool HasElement(Object* receiver,
@@ -444,6 +563,29 @@ class ElementsAccessorBase : public ElementsAccessor {
: backing_store->GetHeap()->the_hole_value();
}
+ MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (backing_store == NULL) {
+ backing_store = holder->elements();
+ }
+ return ElementsAccessorSubclass::GetAttributesImpl(
+ receiver, holder, key, BackingStore::cast(backing_store));
+ }
+
+ MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ BackingStore* backing_store) {
+ if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
+ return ABSENT;
+ }
+ return backing_store->is_the_hole(key) ? ABSENT : NONE;
+ }
+
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
Object* length) {
return ElementsAccessorSubclass::SetLengthImpl(
@@ -455,9 +597,10 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* length,
BackingStore* backing_store);
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
- int capacity,
- int length) {
+ MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
+ JSArray* array,
+ int capacity,
+ int length) {
return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
array,
capacity,
@@ -481,6 +624,7 @@ class ElementsAccessorBase : public ElementsAccessor {
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
UNREACHABLE();
return NULL;
@@ -493,14 +637,27 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t to_start,
int copy_size,
FixedArrayBase* from) {
+ int packed_size = kPackedSizeNotKnown;
if (from == NULL) {
from = from_holder->elements();
}
+
+ if (from_holder) {
+ ElementsKind elements_kind = from_holder->GetElementsKind();
+ bool is_packed = IsFastPackedElementsKind(elements_kind) &&
+ from_holder->IsJSArray();
+ if (is_packed) {
+ packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
+ if (copy_size >= 0 && packed_size > copy_size) {
+ packed_size = copy_size;
+ }
+ }
+ }
if (from->length() == 0) {
return from;
}
return ElementsAccessorSubclass::CopyElementsImpl(
- from, from_start, to, to_kind, to_start, copy_size);
+ from, from_start, to, to_kind, to_start, packed_size, copy_size);
}
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
@@ -623,6 +780,7 @@ class FastElementsAccessor
KindTraits>(name) {}
protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
+ friend class NonStrictArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore;
@@ -633,10 +791,21 @@ class FastElementsAccessor
Object* length_object,
uint32_t length) {
uint32_t old_capacity = backing_store->length();
+ Object* old_length = array->length();
+ bool same_or_smaller_size = old_length->IsSmi() &&
+ static_cast<uint32_t>(Smi::cast(old_length)->value()) >= length;
+ ElementsKind kind = array->GetElementsKind();
+
+ if (!same_or_smaller_size && IsFastElementsKind(kind) &&
+ !IsFastHoleyElementsKind(kind)) {
+ kind = GetHoleyElementsKind(kind);
+ MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
+ if (maybe_obj->IsFailure()) return maybe_obj;
+ }
// Check whether the backing store should be shrunk.
if (length <= old_capacity) {
- if (array->HasFastTypeElements()) {
+ if (array->HasFastSmiOrObjectElements()) {
MaybeObject* maybe_obj = array->EnsureWritableFastElements();
if (!maybe_obj->To(&backing_store)) return maybe_obj;
}
@@ -653,7 +822,7 @@ class FastElementsAccessor
}
} else {
// Otherwise, fill the unused tail with holes.
- int old_length = FastD2I(array->length()->Number());
+ int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
backing_store->set_the_hole(i);
}
@@ -668,59 +837,66 @@ class FastElementsAccessor
MaybeObject* result = FastElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, new_capacity, length);
if (result->IsFailure()) return result;
+ array->ValidateElements();
return length_object;
}
// Request conversion to slow elements.
return array->GetHeap()->undefined_value();
}
-};
-
-
-class FastObjectElementsAccessor
- : public FastElementsAccessor<FastObjectElementsAccessor,
- ElementsKindTraits<FAST_ELEMENTS>,
- kPointerSize> {
- public:
- explicit FastObjectElementsAccessor(const char* name)
- : FastElementsAccessor<FastObjectElementsAccessor,
- ElementsKindTraits<FAST_ELEMENTS>,
- kPointerSize>(name) {}
static MaybeObject* DeleteCommon(JSObject* obj,
- uint32_t key) {
- ASSERT(obj->HasFastElements() ||
- obj->HasFastSmiOnlyElements() ||
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
+ ASSERT(obj->HasFastSmiOrObjectElements() ||
+ obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
Heap* heap = obj->GetHeap();
- FixedArray* backing_store = FixedArray::cast(obj->elements());
- if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
- backing_store = FixedArray::cast(backing_store->get(1));
- } else {
- Object* writable;
- MaybeObject* maybe = obj->EnsureWritableFastElements();
- if (!maybe->ToObject(&writable)) return maybe;
- backing_store = FixedArray::cast(writable);
+ Object* elements = obj->elements();
+ if (elements == heap->empty_fixed_array()) {
+ return heap->true_value();
+ }
+ typename KindTraits::BackingStore* backing_store =
+ KindTraits::BackingStore::cast(elements);
+ bool is_non_strict_arguments_elements_map =
+ backing_store->map() == heap->non_strict_arguments_elements_map();
+ if (is_non_strict_arguments_elements_map) {
+ backing_store =
+ KindTraits::BackingStore::cast(
+ FixedArray::cast(backing_store)->get(1));
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
? Smi::cast(JSArray::cast(obj)->length())->value()
: backing_store->length());
if (key < length) {
+ if (!is_non_strict_arguments_elements_map) {
+ ElementsKind kind = KindTraits::Kind;
+ if (IsFastPackedElementsKind(kind)) {
+ MaybeObject* transitioned =
+ obj->TransitionElementsKind(GetHoleyElementsKind(kind));
+ if (transitioned->IsFailure()) return transitioned;
+ }
+ if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
+ Object* writable;
+ MaybeObject* maybe = obj->EnsureWritableFastElements();
+ if (!maybe->ToObject(&writable)) return maybe;
+ backing_store = KindTraits::BackingStore::cast(writable);
+ }
+ }
backing_store->set_the_hole(key);
// If an old space backing store is larger than a certain size and
// has too few used values, normalize it.
// To avoid doing the check on every delete we require at least
// one adjacent hole to the value being deleted.
- Object* hole = heap->the_hole_value();
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck &&
!heap->InNewSpace(backing_store) &&
- ((key > 0 && backing_store->get(key - 1) == hole) ||
- (key + 1 < length && backing_store->get(key + 1) == hole))) {
+ ((key > 0 && backing_store->is_the_hole(key - 1)) ||
+ (key + 1 < length && backing_store->is_the_hole(key + 1)))) {
int num_used = 0;
for (int i = 0; i < backing_store->length(); ++i) {
- if (backing_store->get(i) != hole) ++num_used;
+ if (!backing_store->is_the_hole(i)) ++num_used;
// Bail out early if more than 1/4 is used.
if (4 * num_used > backing_store->length()) break;
}
@@ -733,27 +909,90 @@ class FastObjectElementsAccessor
return heap->true_value();
}
+ virtual MaybeObject* Delete(JSObject* obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
+ return DeleteCommon(obj, key, mode);
+ }
+
+ static bool HasElementImpl(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ typename KindTraits::BackingStore* backing_store) {
+ if (key >= static_cast<uint32_t>(backing_store->length())) {
+ return false;
+ }
+ return !backing_store->is_the_hole(key);
+ }
+
+ static void ValidateContents(JSObject* holder, int length) {
+#if DEBUG
+ FixedArrayBase* elements = holder->elements();
+ Heap* heap = elements->GetHeap();
+ Map* map = elements->map();
+ ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
+ (map == heap->fixed_array_map() ||
+ map == heap->fixed_cow_array_map())) ||
+ (IsFastDoubleElementsKind(KindTraits::Kind) ==
+ ((map == heap->fixed_array_map() && length == 0) ||
+ map == heap->fixed_double_array_map())));
+ for (int i = 0; i < length; i++) {
+ typename KindTraits::BackingStore* backing_store =
+ KindTraits::BackingStore::cast(elements);
+ ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
+ static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
+ (IsFastHoleyElementsKind(KindTraits::Kind) ==
+ backing_store->is_the_hole(i)));
+ }
+#endif
+ }
+};
+
+
+template<typename FastElementsAccessorSubclass,
+ typename KindTraits>
+class FastSmiOrObjectElementsAccessor
+ : public FastElementsAccessor<FastElementsAccessorSubclass,
+ KindTraits,
+ kPointerSize> {
+ public:
+ explicit FastSmiOrObjectElementsAccessor(const char* name)
+ : FastElementsAccessor<FastElementsAccessorSubclass,
+ KindTraits,
+ kPointerSize>(name) {}
+
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
- switch (to_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
- CopyObjectToObjectElements(
- FixedArray::cast(from), ElementsTraits::Kind, from_start,
- FixedArray::cast(to), to_kind, to_start, copy_size);
- return from;
- }
- case FAST_DOUBLE_ELEMENTS:
+ if (IsFastSmiOrObjectElementsKind(to_kind)) {
+ CopyObjectToObjectElements(
+ FixedArray::cast(from), KindTraits::Kind, from_start,
+ FixedArray::cast(to), to_kind, to_start, copy_size);
+ } else if (IsFastDoubleElementsKind(to_kind)) {
+ if (IsFastSmiElementsKind(KindTraits::Kind)) {
+ if (IsFastPackedElementsKind(KindTraits::Kind) &&
+ packed_size != kPackedSizeNotKnown) {
+ CopyPackedSmiToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start,
+ packed_size, copy_size);
+ } else {
+ CopySmiToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
+ }
+ } else {
CopyObjectToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
- return from;
- default:
- UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
}
return to->GetHeap()->undefined_value();
}
@@ -762,64 +1001,102 @@ class FastObjectElementsAccessor
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
- JSObject::SetFastElementsCapacityMode set_capacity_mode =
- obj->HasFastSmiOnlyElements()
- ? JSObject::kAllowSmiOnlyElements
- : JSObject::kDontAllowSmiOnlyElements;
+ JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
+ obj->HasFastSmiElements()
+ ? JSObject::kAllowSmiElements
+ : JSObject::kDontAllowSmiElements;
return obj->SetFastElementsCapacityAndLength(capacity,
length,
set_capacity_mode);
}
+};
- protected:
- friend class FastElementsAccessor<FastObjectElementsAccessor,
- ElementsKindTraits<FAST_ELEMENTS>,
- kPointerSize>;
- virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- return DeleteCommon(obj, key);
- }
+class FastPackedSmiElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<
+ FastPackedSmiElementsAccessor,
+ ElementsKindTraits<FAST_SMI_ELEMENTS> > {
+ public:
+ explicit FastPackedSmiElementsAccessor(const char* name)
+ : FastSmiOrObjectElementsAccessor<
+ FastPackedSmiElementsAccessor,
+ ElementsKindTraits<FAST_SMI_ELEMENTS> >(name) {}
+};
+
+
+class FastHoleySmiElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<
+ FastHoleySmiElementsAccessor,
+ ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> > {
+ public:
+ explicit FastHoleySmiElementsAccessor(const char* name)
+ : FastSmiOrObjectElementsAccessor<
+ FastHoleySmiElementsAccessor,
+ ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> >(name) {}
+};
+
+
+class FastPackedObjectElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<
+ FastPackedObjectElementsAccessor,
+ ElementsKindTraits<FAST_ELEMENTS> > {
+ public:
+ explicit FastPackedObjectElementsAccessor(const char* name)
+ : FastSmiOrObjectElementsAccessor<
+ FastPackedObjectElementsAccessor,
+ ElementsKindTraits<FAST_ELEMENTS> >(name) {}
+};
+
+
+class FastHoleyObjectElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<
+ FastHoleyObjectElementsAccessor,
+ ElementsKindTraits<FAST_HOLEY_ELEMENTS> > {
+ public:
+ explicit FastHoleyObjectElementsAccessor(const char* name)
+ : FastSmiOrObjectElementsAccessor<
+ FastHoleyObjectElementsAccessor,
+ ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
};
+template<typename FastElementsAccessorSubclass,
+ typename KindTraits>
class FastDoubleElementsAccessor
- : public FastElementsAccessor<FastDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
+ : public FastElementsAccessor<FastElementsAccessorSubclass,
+ KindTraits,
kDoubleSize> {
public:
explicit FastDoubleElementsAccessor(const char* name)
- : FastElementsAccessor<FastDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
+ : FastElementsAccessor<FastElementsAccessorSubclass,
+ KindTraits,
kDoubleSize>(name) {}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
- return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
+ return obj->SetFastDoubleElementsCapacityAndLength(capacity,
+ length);
}
protected:
- friend class ElementsAccessorBase<FastDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
- friend class FastElementsAccessor<FastDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
- kDoubleSize>;
-
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
switch (to_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
return CopyDoubleToObjectElements(
FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
to_kind, to_start, copy_size);
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
FixedDoubleArray::cast(to),
to_start, copy_size);
@@ -829,26 +1106,35 @@ class FastDoubleElementsAccessor
}
return to->GetHeap()->undefined_value();
}
+};
- virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- int length = obj->IsJSArray()
- ? Smi::cast(JSArray::cast(obj)->length())->value()
- : FixedDoubleArray::cast(obj->elements())->length();
- if (key < static_cast<uint32_t>(length)) {
- FixedDoubleArray::cast(obj->elements())->set_the_hole(key);
- }
- return obj->GetHeap()->true_value();
- }
- static bool HasElementImpl(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedDoubleArray* backing_store) {
- return key < static_cast<uint32_t>(backing_store->length()) &&
- !backing_store->is_the_hole(key);
- }
+class FastPackedDoubleElementsAccessor
+ : public FastDoubleElementsAccessor<
+ FastPackedDoubleElementsAccessor,
+ ElementsKindTraits<FAST_DOUBLE_ELEMENTS> > {
+ public:
+ friend class ElementsAccessorBase<FastPackedDoubleElementsAccessor,
+ ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
+ explicit FastPackedDoubleElementsAccessor(const char* name)
+ : FastDoubleElementsAccessor<
+ FastPackedDoubleElementsAccessor,
+ ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >(name) {}
+};
+
+
+class FastHoleyDoubleElementsAccessor
+ : public FastDoubleElementsAccessor<
+ FastHoleyDoubleElementsAccessor,
+ ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> > {
+ public:
+ friend class ElementsAccessorBase<
+ FastHoleyDoubleElementsAccessor,
+ ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >;
+ explicit FastHoleyDoubleElementsAccessor(const char* name)
+ : FastDoubleElementsAccessor<
+ FastHoleyDoubleElementsAccessor,
+ ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >(name) {}
};
@@ -879,6 +1165,16 @@ class ExternalElementsAccessor
: backing_store->GetHeap()->undefined_value();
}
+ MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ BackingStore* backing_store) {
+ return
+ key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ ? NONE : ABSENT;
+ }
+
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
@@ -1011,7 +1307,30 @@ class DictionaryElementsAccessor
JSArray* array,
Object* length_object,
uint32_t length) {
- if (length == 0) {
+ Heap* heap = array->GetHeap();
+ int capacity = dict->Capacity();
+ uint32_t new_length = length;
+ uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
+ if (new_length < old_length) {
+ // Find last non-deletable element in range of elements to be
+ // deleted and adjust range accordingly.
+ for (int i = 0; i < capacity; i++) {
+ Object* key = dict->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t number = static_cast<uint32_t>(key->Number());
+ if (new_length <= number && number < old_length) {
+ PropertyDetails details = dict->DetailsAt(i);
+ if (details.IsDontDelete()) new_length = number + 1;
+ }
+ }
+ }
+ if (new_length != length) {
+ MaybeObject* maybe_object = heap->NumberFromUint32(new_length);
+ if (!maybe_object->To(&length_object)) return maybe_object;
+ }
+ }
+
+ if (new_length == 0) {
// If the length of a slow array is reset to zero, we clear
// the array and flush backing storage. This has the added
// benefit that the array returns to fast mode.
@@ -1019,45 +1338,22 @@ class DictionaryElementsAccessor
MaybeObject* maybe_obj = array->ResetElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
} else {
- uint32_t new_length = length;
- uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
- if (new_length < old_length) {
- // Find last non-deletable element in range of elements to be
- // deleted and adjust range accordingly.
- Heap* heap = array->GetHeap();
- int capacity = dict->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* key = dict->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(key->Number());
- if (new_length <= number && number < old_length) {
- PropertyDetails details = dict->DetailsAt(i);
- if (details.IsDontDelete()) new_length = number + 1;
- }
+ // Remove elements that should be deleted.
+ int removed_entries = 0;
+ Object* the_hole_value = heap->the_hole_value();
+ for (int i = 0; i < capacity; i++) {
+ Object* key = dict->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t number = static_cast<uint32_t>(key->Number());
+ if (new_length <= number && number < old_length) {
+ dict->SetEntry(i, the_hole_value, the_hole_value);
+ removed_entries++;
}
}
- if (new_length != length) {
- MaybeObject* maybe_object = heap->NumberFromUint32(new_length);
- if (!maybe_object->To(&length_object)) return maybe_object;
- }
-
- // Remove elements that should be deleted.
- int removed_entries = 0;
- Object* the_hole_value = heap->the_hole_value();
- for (int i = 0; i < capacity; i++) {
- Object* key = dict->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(key->Number());
- if (new_length <= number && number < old_length) {
- dict->SetEntry(i, the_hole_value, the_hole_value);
- removed_entries++;
- }
- }
- }
-
- // Update the number of elements.
- dict->ElementsRemoved(removed_entries);
}
+
+ // Update the number of elements.
+ dict->ElementsRemoved(removed_entries);
}
return length_object;
}
@@ -1112,15 +1408,19 @@ class DictionaryElementsAccessor
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
switch (to_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
CopyDictionaryToObjectElements(
SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDictionaryToDoubleElements(
SeededNumberDictionary::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
@@ -1163,6 +1463,18 @@ class DictionaryElementsAccessor
return obj->GetHeap()->the_hole_value();
}
+ MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ SeededNumberDictionary* backing_store) {
+ int entry = backing_store->FindEntry(key);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ return backing_store->DetailsAt(entry).attributes();
+ }
+ return ABSENT;
+ }
+
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
@@ -1222,6 +1534,22 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
+ MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArray* parameter_map) {
+ Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ if (!probe->IsTheHole()) {
+ return NONE;
+ } else {
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ElementsAccessor::ForArray(arguments)->GetAttributes(
+ receiver, obj, key, arguments);
+ }
+ }
+
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
@@ -1247,7 +1575,10 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
- return FastObjectElementsAccessor::DeleteCommon(obj, key);
+ // It's difficult to access the version of DeleteCommon that is declared
+ // in the templatized super class, call the concrete implementation in
+ // the class for the most generalized ElementsKind subclass.
+ return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
}
}
return obj->GetHeap()->true_value();
@@ -1258,6 +1589,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
+ int packed_size,
int copy_size) {
FixedArray* parameter_map = FixedArray::cast(from);
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@@ -1311,7 +1643,7 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
if (array->IsDictionary()) {
return elements_accessors_[DICTIONARY_ELEMENTS];
} else {
- return elements_accessors_[FAST_ELEMENTS];
+ return elements_accessors_[FAST_HOLEY_ELEMENTS];
}
case EXTERNAL_BYTE_ARRAY_TYPE:
return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];
diff --git a/src/3rdparty/v8/src/elements.h b/src/3rdparty/v8/src/elements.h
index 55d6fa5..8a83f0f 100644
--- a/src/3rdparty/v8/src/elements.h
+++ b/src/3rdparty/v8/src/elements.h
@@ -28,6 +28,7 @@
#ifndef V8_ELEMENTS_H_
#define V8_ELEMENTS_H_
+#include "elements-kind.h"
#include "objects.h"
#include "heap.h"
#include "isolate.h"
@@ -45,6 +46,10 @@ class ElementsAccessor {
virtual ElementsKind kind() const = 0;
const char* name() const { return name_; }
+ // Checks the elements of an object for consistency, asserting when a problem
+ // is found.
+ virtual void Validate(JSObject* obj) = 0;
+
// Returns true if a holder contains an element with the specified key
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
@@ -66,6 +71,17 @@ class ElementsAccessor {
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
+ // Returns an element's attributes, or ABSENT if there is no such
+ // element. This method doesn't iterate up the prototype chain. The caller
+ // can optionally pass in the backing store to use for the check, which must
+ // be compatible with the ElementsKind of the ElementsAccessor. If
+ // backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store = NULL) = 0;
+
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
diff --git a/src/3rdparty/v8/src/execution.cc b/src/3rdparty/v8/src/execution.cc
index 7e69abe..913bf64 100644
--- a/src/3rdparty/v8/src/execution.cc
+++ b/src/3rdparty/v8/src/execution.cc
@@ -101,12 +101,12 @@ static Handle<Object> Invoke(bool is_construct,
// Make sure that the global object of the context we're about to
// make the current one is indeed a global object.
- ASSERT(function->context()->global()->IsGlobalObject());
+ ASSERT(function->context()->global_object()->IsGlobalObject());
Handle<JSObject> oldqml;
if (!qml.is_null()) {
- oldqml = Handle<JSObject>(function->context()->qml_global());
- function->context()->set_qml_global(JSObject::cast(*qml));
+ oldqml = Handle<JSObject>(function->context()->qml_global_object());
+ function->context()->set_qml_global_object(JSObject::cast(*qml));
}
{
@@ -126,9 +126,9 @@ static Handle<Object> Invoke(bool is_construct,
}
if (!qml.is_null())
- function->context()->set_qml_global(*oldqml);
+ function->context()->set_qml_global_object(*oldqml);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
value->Verify();
#endif
@@ -142,6 +142,12 @@ static Handle<Object> Invoke(bool is_construct,
V8::FatalProcessOutOfMemory("JS", true);
}
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Reset stepping state when script exits with uncaught exception.
+ if (isolate->debugger()->IsDebuggerActive()) {
+ isolate->debug()->ClearStepping();
+ }
+#endif // ENABLE_DEBUGGER_SUPPORT
return Handle<Object>();
} else {
isolate->clear_pending_message();
@@ -158,7 +164,8 @@ Handle<Object> Execution::Call(Handle<Object> callable,
bool* pending_exception,
bool convert_receiver)
{
- return Call(callable, receiver, argc, argv, pending_exception, convert_receiver, Handle<Object>());
+ return Call(callable, receiver, argc, argv, pending_exception,
+ convert_receiver, Handle<Object>());
}
Handle<Object> Execution::Call(Handle<Object> callable,
@@ -180,10 +187,10 @@ Handle<Object> Execution::Call(Handle<Object> callable,
if (convert_receiver && !receiver->IsJSReceiver() &&
!func->shared()->native() && func->shared()->is_classic_mode()) {
if (receiver->IsUndefined() || receiver->IsNull()) {
- Object* global = func->context()->global()->global_receiver();
+ Object* global = func->context()->global_object()->global_receiver();
// Under some circumstances, 'global' can be the JSBuiltinsObject
- // In that case, don't rewrite.
- // (FWIW, the same holds for GetIsolate()->global()->global_receiver().)
+ // In that case, don't rewrite. (FWIW, the same holds for
+ // GetIsolate()->global_object()->global_receiver().)
if (!global->IsJSBuiltinsObject()) receiver = Handle<Object>(global);
} else {
receiver = ToObject(receiver, pending_exception);
@@ -199,7 +206,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
int argc,
Handle<Object> argv[],
bool* pending_exception) {
- return Invoke(true, func, Isolate::Current()->global(), argc, argv,
+ return Invoke(true, func, Isolate::Current()->global_object(), argc, argv,
pending_exception, Handle<Object>());
}
@@ -261,7 +268,7 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- isolate->global_context()->call_as_function_delegate());
+ isolate->native_context()->call_as_function_delegate());
}
return factory->undefined_value();
@@ -285,7 +292,7 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- isolate->global_context()->call_as_function_delegate());
+ isolate->native_context()->call_as_function_delegate());
}
// If the Object doesn't have an instance-call handler we should
@@ -318,7 +325,7 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- isolate->global_context()->call_as_constructor_delegate());
+ isolate->native_context()->call_as_constructor_delegate());
}
return isolate->factory()->undefined_value();
@@ -346,7 +353,7 @@ Handle<Object> Execution::TryGetConstructorDelegate(
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
- isolate->global_context()->call_as_constructor_delegate());
+ isolate->native_context()->call_as_constructor_delegate());
}
// If the Object doesn't have an instance-call handler we should
@@ -461,6 +468,25 @@ void StackGuard::RequestRuntimeProfilerTick() {
}
+void StackGuard::RequestCodeReadyEvent() {
+ ASSERT(FLAG_parallel_recompilation);
+ if (ExecutionAccess::TryLock(isolate_)) {
+ thread_local_.interrupt_flags_ |= CODE_READY;
+ if (thread_local_.postpone_interrupts_nesting_ == 0) {
+ thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+ }
+ ExecutionAccess::Unlock(isolate_);
+ }
+}
+
+
+bool StackGuard::IsCodeReadyEvent() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & CODE_READY) != 0;
+}
+
+
bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
@@ -676,7 +702,7 @@ Handle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
Handle<String> flags,
bool* exc) {
Handle<JSFunction> function = Handle<JSFunction>(
- pattern->GetIsolate()->global_context()->regexp_function());
+ pattern->GetIsolate()->native_context()->regexp_function());
Handle<Object> re_obj = RegExpImpl::CreateRegExpLiteral(
function, pattern, flags, exc);
if (*exc) return Handle<JSRegExp>();
@@ -722,7 +748,7 @@ Handle<JSFunction> Execution::InstantiateFunction(
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
Object* elm =
- isolate->global_context()->function_cache()->
+ isolate->native_context()->function_cache()->
GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it.
@@ -847,6 +873,11 @@ Object* Execution::DebugBreakHelper() {
return isolate->heap()->undefined_value();
}
+ // Ignore debug break if debugger is not active.
+ if (!isolate->debugger()->IsDebuggerActive()) {
+ return isolate->heap()->undefined_value();
+ }
+
StackLimitCheck check(isolate);
if (check.HasOverflowed()) {
return isolate->heap()->undefined_value();
@@ -861,7 +892,7 @@ Object* Execution::DebugBreakHelper() {
if (JSFunction::cast(fun)->IsBuiltin()) {
return isolate->heap()->undefined_value();
}
- GlobalObject* global = JSFunction::cast(fun)->context()->global();
+ GlobalObject* global = JSFunction::cast(fun)->context()->global_object();
// Don't stop in debugger functions.
if (isolate->debug()->IsDebugGlobal(global)) {
return isolate->heap()->undefined_value();
@@ -921,6 +952,17 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(GC_REQUEST);
}
+ if (stack_guard->IsCodeReadyEvent()) {
+ ASSERT(FLAG_parallel_recompilation);
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** CODE_READY event received.\n");
+ }
+ stack_guard->Continue(CODE_READY);
+ }
+ if (!stack_guard->IsTerminateExecution()) {
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ }
+
isolate->counters()->stack_interrupts()->Increment();
// If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
if (FLAG_count_based_interrupts ||
diff --git a/src/3rdparty/v8/src/execution.h b/src/3rdparty/v8/src/execution.h
index c33a675..90219f5 100644
--- a/src/3rdparty/v8/src/execution.h
+++ b/src/3rdparty/v8/src/execution.h
@@ -42,7 +42,8 @@ enum InterruptFlag {
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5,
- GC_REQUEST = 1 << 6
+ GC_REQUEST = 1 << 6,
+ CODE_READY = 1 << 7
};
@@ -203,6 +204,8 @@ class StackGuard {
void TerminateExecution();
bool IsRuntimeProfilerTick();
void RequestRuntimeProfilerTick();
+ bool IsCodeReadyEvent();
+ void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT
bool IsDebugBreak();
void DebugBreak();
diff --git a/src/3rdparty/v8/src/extensions/gc-extension.cc b/src/3rdparty/v8/src/extensions/gc-extension.cc
index f921552..813b921 100644
--- a/src/3rdparty/v8/src/extensions/gc-extension.cc
+++ b/src/3rdparty/v8/src/extensions/gc-extension.cc
@@ -40,7 +40,11 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
- HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
+ if (args[0]->BooleanValue()) {
+ HEAP->CollectGarbage(NEW_SPACE, "gc extension");
+ } else {
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
+ }
return v8::Undefined();
}
diff --git a/src/3rdparty/v8/src/extensions/statistics-extension.cc b/src/3rdparty/v8/src/extensions/statistics-extension.cc
new file mode 100644
index 0000000..7ae090c
--- /dev/null
+++ b/src/3rdparty/v8/src/extensions/statistics-extension.cc
@@ -0,0 +1,153 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "statistics-extension.h"
+
+namespace v8 {
+namespace internal {
+
+const char* const StatisticsExtension::kSource =
+ "native function getV8Statistics();";
+
+
+v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ ASSERT(strcmp(*v8::String::AsciiValue(str), "getV8Statistics") == 0);
+ return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
+}
+
+
+static void AddCounter(v8::Local<v8::Object> object,
+ StatsCounter* counter,
+ const char* name) {
+ if (counter->Enabled()) {
+ object->Set(v8::String::New(name),
+ v8::Number::New(*counter->GetInternalPointer()));
+ }
+}
+
+static void AddNumber(v8::Local<v8::Object> object,
+ intptr_t value,
+ const char* name) {
+ object->Set(v8::String::New(name),
+ v8::Number::New(static_cast<double>(value)));
+}
+
+
+v8::Handle<v8::Value> StatisticsExtension::GetCounters(
+ const v8::Arguments& args) {
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+
+ if (args.Length() > 0) { // GC if first argument evaluates to true.
+ if (args[0]->IsBoolean() && args[0]->ToBoolean()->Value()) {
+ heap->CollectAllGarbage(Heap::kNoGCFlags, "counters extension");
+ }
+ }
+
+ Counters* counters = isolate->counters();
+ v8::Local<v8::Object> result = v8::Object::New();
+
+#define ADD_COUNTER(name, caption) \
+ AddCounter(result, counters->name(), #name);
+
+ STATS_COUNTER_LIST_1(ADD_COUNTER)
+ STATS_COUNTER_LIST_2(ADD_COUNTER)
+#undef ADD_COUNTER
+#define ADD_COUNTER(name) \
+ AddCounter(result, counters->count_of_##name(), "count_of_" #name); \
+ AddCounter(result, counters->size_of_##name(), "size_of_" #name);
+
+ INSTANCE_TYPE_LIST(ADD_COUNTER)
+#undef ADD_COUNTER
+#define ADD_COUNTER(name) \
+ AddCounter(result, counters->count_of_CODE_TYPE_##name(), \
+ "count_of_CODE_TYPE_" #name); \
+ AddCounter(result, counters->size_of_CODE_TYPE_##name(), \
+ "size_of_CODE_TYPE_" #name);
+
+ CODE_KIND_LIST(ADD_COUNTER)
+#undef ADD_COUNTER
+#define ADD_COUNTER(name) \
+ AddCounter(result, counters->count_of_FIXED_ARRAY_##name(), \
+ "count_of_FIXED_ARRAY_" #name); \
+ AddCounter(result, counters->size_of_FIXED_ARRAY_##name(), \
+ "size_of_FIXED_ARRAY_" #name);
+
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
+#undef ADD_COUNTER
+
+ AddNumber(result, isolate->memory_allocator()->Size(),
+ "total_committed_bytes");
+ AddNumber(result, heap->new_space()->Size(),
+ "new_space_live_bytes");
+ AddNumber(result, heap->new_space()->Available(),
+ "new_space_available_bytes");
+ AddNumber(result, heap->new_space()->CommittedMemory(),
+ "new_space_commited_bytes");
+ AddNumber(result, heap->old_pointer_space()->Size(),
+ "old_pointer_space_live_bytes");
+ AddNumber(result, heap->old_pointer_space()->Available(),
+ "old_pointer_space_available_bytes");
+ AddNumber(result, heap->old_pointer_space()->CommittedMemory(),
+ "old_pointer_space_commited_bytes");
+ AddNumber(result, heap->old_data_space()->Size(),
+ "old_data_space_live_bytes");
+ AddNumber(result, heap->old_data_space()->Available(),
+ "old_data_space_available_bytes");
+ AddNumber(result, heap->old_data_space()->CommittedMemory(),
+ "old_data_space_commited_bytes");
+ AddNumber(result, heap->code_space()->Size(),
+ "code_space_live_bytes");
+ AddNumber(result, heap->code_space()->Available(),
+ "code_space_available_bytes");
+ AddNumber(result, heap->code_space()->CommittedMemory(),
+ "code_space_commited_bytes");
+ AddNumber(result, heap->cell_space()->Size(),
+ "cell_space_live_bytes");
+ AddNumber(result, heap->cell_space()->Available(),
+ "cell_space_available_bytes");
+ AddNumber(result, heap->cell_space()->CommittedMemory(),
+ "cell_space_commited_bytes");
+ AddNumber(result, heap->lo_space()->Size(),
+ "lo_space_live_bytes");
+ AddNumber(result, heap->lo_space()->Available(),
+ "lo_space_available_bytes");
+ AddNumber(result, heap->lo_space()->CommittedMemory(),
+ "lo_space_commited_bytes");
+ AddNumber(result, heap->amount_of_external_allocated_memory(),
+ "amount_of_external_allocated_memory");
+ return result;
+}
+
+
+void StatisticsExtension::Register() {
+ static StatisticsExtension statistics_extension;
+ static v8::DeclareExtension declaration(&statistics_extension);
+}
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/extensions/statistics-extension.h b/src/3rdparty/v8/src/extensions/statistics-extension.h
new file mode 100644
index 0000000..433c4cf
--- /dev/null
+++ b/src/3rdparty/v8/src/extensions/statistics-extension.h
@@ -0,0 +1,49 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
+#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class StatisticsExtension : public v8::Extension {
+ public:
+ StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> GetCounters(const v8::Arguments& args);
+ static void Register();
+ private:
+ static const char* const kSource;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_STATISTICS_EXTENSION_H_
diff --git a/src/3rdparty/v8/src/factory.cc b/src/3rdparty/v8/src/factory.cc
index 64b26a4..703251f 100644
--- a/src/3rdparty/v8/src/factory.cc
+++ b/src/3rdparty/v8/src/factory.cc
@@ -34,6 +34,7 @@
#include "macro-assembler.h"
#include "objects.h"
#include "objects-visiting.h"
+#include "platform.h"
#include "scopeinfo.h"
namespace v8 {
@@ -111,10 +112,11 @@ Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
}
-Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
+Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
+ int slack) {
ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(),
- DescriptorArray::Allocate(number_of_descriptors),
+ DescriptorArray::Allocate(number_of_descriptors, slack),
DescriptorArray);
}
@@ -283,19 +285,27 @@ Handle<String> Factory::NewExternalStringFromTwoByte(
}
-Handle<Context> Factory::NewGlobalContext() {
+Handle<Context> Factory::NewNativeContext() {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateGlobalContext(),
+ isolate()->heap()->AllocateNativeContext(),
Context);
}
-Handle<Context> Factory::NewModuleContext(Handle<Context> previous,
+Handle<Context> Factory::NewGlobalContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateModuleContext(*previous, *scope_info),
+ isolate()->heap()->AllocateGlobalContext(*function, *scope_info),
+ Context);
+}
+
+
+Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateModuleContext(*scope_info),
Context);
}
@@ -464,14 +474,15 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
}
-Handle<Map> Factory::CopyMapDropDescriptors(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->CopyDropDescriptors(), Map);
+Handle<Map> Factory::CopyWithPreallocatedFieldDescriptors(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(
+ isolate(), src->CopyWithPreallocatedFieldDescriptors(), Map);
}
Handle<Map> Factory::CopyMap(Handle<Map> src,
int extra_inobject_properties) {
- Handle<Map> copy = CopyMapDropDescriptors(src);
+ Handle<Map> copy = CopyWithPreallocatedFieldDescriptors(src);
// Check that we do not overflow the instance size when adding the
// extra inobject properties.
int instance_size_delta = extra_inobject_properties * kPointerSize;
@@ -494,8 +505,8 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
}
-Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
- CALL_HEAP_FUNCTION(isolate(), src->CopyDropTransitions(), Map);
+Handle<Map> Factory::CopyMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(isolate(), src->Copy(), Map);
}
@@ -550,18 +561,27 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
}
result->set_context(*context);
- if (!function_info->bound()) {
+
+ int index = function_info->SearchOptimizedCodeMap(context->native_context());
+ if (!function_info->bound() && index < 0) {
int number_of_literals = function_info->num_literals();
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) {
- // Store the object, regexp and array functions in the literals
- // array prefix. These functions will be used when creating
- // object, regexp and array literals in this function.
- literals->set(JSFunction::kLiteralGlobalContextIndex,
- context->global_context());
+ // Store the native context in the literals array prefix. This
+ // context will be used when creating object, regexp and array
+ // literals in this function.
+ literals->set(JSFunction::kLiteralNativeContextIndex,
+ context->native_context());
}
result->set_literals(*literals);
}
+
+ if (index > 0) {
+ // Caching of optimized code enabled and optimized code found.
+ function_info->InstallFromOptimizedCodeMap(*result, index);
+ return result;
+ }
+
if (V8::UseCrankshaft() &&
FLAG_always_opt &&
result->is_compiled() &&
@@ -675,6 +695,43 @@ Handle<Object> Factory::NewError(const char* type,
}
+Handle<String> Factory::EmergencyNewError(const char* type,
+ Handle<JSArray> args) {
+ const int kBufferSize = 1000;
+ char buffer[kBufferSize];
+ size_t space = kBufferSize;
+ char* p = &buffer[0];
+
+ Vector<char> v(buffer, kBufferSize);
+ OS::StrNCpy(v, type, space);
+ space -= Min(space, strlen(type));
+ p = &buffer[kBufferSize] - space;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(args); i++) {
+ if (space > 0) {
+ *p++ = ' ';
+ space--;
+ if (space > 0) {
+ MaybeObject* maybe_arg = args->GetElement(i);
+ Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
+ const char* arg = *arg_str->ToCString();
+ Vector<char> v2(p, static_cast<int>(space));
+ OS::StrNCpy(v2, arg, space);
+ space -= Min(space, strlen(arg));
+ p = &buffer[kBufferSize] - space;
+ }
+ }
+ }
+ if (space > 0) {
+ *p = '\0';
+ } else {
+ buffer[kBufferSize - 1] = '\0';
+ }
+ Handle<String> error_string = NewStringFromUtf8(CStrVector(buffer), TENURED);
+ return error_string;
+}
+
+
Handle<Object> Factory::NewError(const char* maker,
const char* type,
Handle<JSArray> args) {
@@ -683,8 +740,9 @@ Handle<Object> Factory::NewError(const char* maker,
isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str));
// If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out.
- if (!fun_obj->IsJSFunction())
- return undefined_value();
+ if (!fun_obj->IsJSFunction()) {
+ return EmergencyNewError(type, args);
+ }
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = LookupAsciiSymbol(type);
Handle<Object> argv[] = { type_obj, args };
@@ -775,7 +833,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
instance_size != JSObject::kHeaderSize) {
Handle<Map> initial_map = NewMap(type,
instance_size,
- FAST_SMI_ONLY_ELEMENTS);
+ GetInitialFastElementsKind());
function->set_initial_map(*initial_map);
initial_map->set_constructor(*function);
}
@@ -837,97 +895,12 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
-MUST_USE_RESULT static inline MaybeObject* DoCopyInsert(
- DescriptorArray* array,
- String* key,
- Object* value,
- PropertyAttributes attributes) {
- CallbacksDescriptor desc(key, value, attributes);
- MaybeObject* obj = array->CopyInsert(&desc, REMOVE_TRANSITIONS);
- return obj;
-}
-
-
-// Allocate the new array.
-Handle<DescriptorArray> Factory::CopyAppendForeignDescriptor(
- Handle<DescriptorArray> array,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(isolate(),
- DoCopyInsert(*array, *key, *value, attributes),
- DescriptorArray);
-}
-
-
Handle<String> Factory::SymbolFromString(Handle<String> value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->LookupSymbol(*value), String);
}
-Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
- Handle<DescriptorArray> array,
- Handle<Object> descriptors) {
- v8::NeanderArray callbacks(descriptors);
- int nof_callbacks = callbacks.length();
- Handle<DescriptorArray> result =
- NewDescriptorArray(array->number_of_descriptors() + nof_callbacks);
-
- // Number of descriptors added to the result so far.
- int descriptor_count = 0;
-
- // Ensure that marking will not progress and change color of objects.
- DescriptorArray::WhitenessWitness witness(*result);
-
- // Copy the descriptors from the array.
- for (int i = 0; i < array->number_of_descriptors(); i++) {
- if (!array->IsNullDescriptor(i)) {
- DescriptorArray::CopyFrom(result, descriptor_count++, array, i, witness);
- }
- }
-
- // Number of duplicates detected.
- int duplicates = 0;
-
- // Fill in new callback descriptors. Process the callbacks from
- // back to front so that the last callback with a given name takes
- // precedence over previously added callbacks with that name.
- for (int i = nof_callbacks - 1; i >= 0; i--) {
- Handle<AccessorInfo> entry =
- Handle<AccessorInfo>(AccessorInfo::cast(callbacks.get(i)));
- // Ensure the key is a symbol before writing into the instance descriptor.
- Handle<String> key =
- SymbolFromString(Handle<String>(String::cast(entry->name())));
- // Check if a descriptor with this name already exists before writing.
- if (result->LinearSearch(*key, descriptor_count) ==
- DescriptorArray::kNotFound) {
- CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
- result->Set(descriptor_count, &desc, witness);
- descriptor_count++;
- } else {
- duplicates++;
- }
- }
-
- // If duplicates were detected, allocate a result of the right size
- // and transfer the elements.
- if (duplicates > 0) {
- int number_of_descriptors = result->number_of_descriptors() - duplicates;
- Handle<DescriptorArray> new_result =
- NewDescriptorArray(number_of_descriptors);
- for (int i = 0; i < number_of_descriptors; i++) {
- DescriptorArray::CopyFrom(new_result, i, result, i, witness);
- }
- result = new_result;
- }
-
- // Sort the result before returning.
- result->Sort(witness);
- return result;
-}
-
-
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
@@ -936,10 +909,11 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
-Handle<JSModule> Factory::NewJSModule() {
+Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
+ Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSModule(), JSModule);
+ isolate()->heap()->AllocateJSModule(*context, *scope_info), JSModule);
}
@@ -1013,10 +987,11 @@ void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
+ uint32_t length,
EnsureElementsMode mode) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
- array->EnsureCanContainElements(*elements, mode));
+ array->EnsureCanContainElements(*elements, length, mode));
}
@@ -1045,7 +1020,7 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
-void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
+void Factory::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
object->SetIdentityHash(hash, ALLOW_CREATION));
@@ -1145,7 +1120,7 @@ Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<Object> prototype) {
Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
- fun->set_context(isolate()->context()->global_context());
+ fun->set_context(isolate()->context()->native_context());
return fun;
}
@@ -1171,7 +1146,7 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
LanguageMode language_mode) {
Handle<JSFunction> fun =
NewFunctionWithoutPrototypeHelper(name, language_mode);
- fun->set_context(isolate()->context()->global_context());
+ fun->set_context(isolate()->context()->native_context());
return fun;
}
@@ -1182,8 +1157,8 @@ Handle<Object> Factory::ToObject(Handle<Object> object) {
Handle<Object> Factory::ToObject(Handle<Object> object,
- Handle<Context> global_context) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(*global_context), Object);
+ Handle<Context> native_context) {
+ CALL_HEAP_FUNCTION(isolate(), object->ToObject(*native_context), Object);
}
@@ -1333,20 +1308,31 @@ Handle<JSFunction> Factory::CreateApiFunction(
result->shared()->DontAdaptArguments();
// Recursively copy parent templates' accessors, 'data' may be modified.
- Handle<DescriptorArray> array =
- Handle<DescriptorArray>(map->instance_descriptors());
+ int max_number_of_additional_properties = 0;
+ FunctionTemplateInfo* info = *obj;
+ while (true) {
+ Object* props = info->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props);
+ NeanderArray props_array(props_handle);
+ max_number_of_additional_properties += props_array.length();
+ }
+ Object* parent = info->parent_template();
+ if (parent->IsUndefined()) break;
+ info = FunctionTemplateInfo::cast(parent);
+ }
+
+ Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
+
while (true) {
Handle<Object> props = Handle<Object>(obj->property_accessors());
if (!props->IsUndefined()) {
- array = CopyAppendCallbackDescriptors(array, props);
+ Map::AppendCallbackDescriptors(map, props);
}
Handle<Object> parent = Handle<Object>(obj->parent_template());
if (parent->IsUndefined()) break;
obj = Handle<FunctionTemplateInfo>::cast(parent);
}
- if (!array->IsEmpty()) {
- map->set_instance_descriptors(*array);
- }
ASSERT(result->shared()->IsApiFunction());
return result;
@@ -1383,7 +1369,7 @@ Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
Handle<FixedArray> keys) {
if (context->map_cache()->IsUndefined()) {
- // Allocate the new map cache for the global context.
+ // Allocate the new map cache for the native context.
Handle<MapCache> new_cache = NewMapCache(24);
context->set_map_cache(*new_cache);
}
diff --git a/src/3rdparty/v8/src/factory.h b/src/3rdparty/v8/src/factory.h
index 06aad1b..51065aa 100644
--- a/src/3rdparty/v8/src/factory.h
+++ b/src/3rdparty/v8/src/factory.h
@@ -66,7 +66,8 @@ class Factory {
Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
- Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
+ Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors,
+ int slack = 0);
Handle<DeoptimizationInputData> NewDeoptimizationInputData(
int deopt_entry_count,
PretenureFlag pretenure);
@@ -160,12 +161,15 @@ class Factory {
const ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context.
- Handle<Context> NewGlobalContext();
+ Handle<Context> NewNativeContext();
- // Create a module context.
- Handle<Context> NewModuleContext(Handle<Context> previous,
+ // Create a global context.
+ Handle<Context> NewGlobalContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info);
+ // Create a module context.
+ Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
+
// Create a function context.
Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
@@ -216,19 +220,19 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
- Handle<Map> NewMap(InstanceType type,
- int instance_size,
- ElementsKind elements_kind = FAST_ELEMENTS);
+ Handle<Map> NewMap(
+ InstanceType type,
+ int instance_size,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
- Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
+ Handle<Map> CopyWithPreallocatedFieldDescriptors(Handle<Map> map);
// Copy the map adding more inobject properties if possible without
// overflowing the instance size.
Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
-
- Handle<Map> CopyMapDropTransitions(Handle<Map> map);
+ Handle<Map> CopyMap(Handle<Map> map);
Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
ElementsKind elements_kind);
@@ -266,16 +270,18 @@ class Factory {
Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
// JS modules are pretenured.
- Handle<JSModule> NewJSModule();
+ Handle<JSModule> NewJSModule(Handle<Context> context,
+ Handle<ScopeInfo> scope_info);
// JS arrays are pretenured when allocated by the parser.
- Handle<JSArray> NewJSArray(int capacity,
- ElementsKind elements_kind = FAST_ELEMENTS,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArray(
+ int capacity,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
- ElementsKind elements_kind = FAST_ELEMENTS,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED);
void SetElementsCapacityAndLength(Handle<JSArray> array,
@@ -287,6 +293,7 @@ class Factory {
void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
+ uint32_t length,
EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
@@ -295,7 +302,7 @@ class Factory {
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
- void SetIdentityHash(Handle<JSObject> object, Object* hash);
+ void SetIdentityHash(Handle<JSObject> object, Smi* hash);
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
@@ -329,12 +336,13 @@ class Factory {
Handle<Object> ToObject(Handle<Object> object);
Handle<Object> ToObject(Handle<Object> object,
- Handle<Context> global_context);
+ Handle<Context> native_context);
// Interface for creating error objects.
Handle<Object> NewError(const char* maker, const char* type,
Handle<JSArray> args);
+ Handle<String> EmergencyNewError(const char* type, Handle<JSArray> args);
Handle<Object> NewError(const char* maker, const char* type,
Vector< Handle<Object> > args);
Handle<Object> NewError(const char* type,
@@ -382,12 +390,6 @@ class Factory {
Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code);
- Handle<DescriptorArray> CopyAppendForeignDescriptor(
- Handle<DescriptorArray> array,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
Handle<String> NumberToString(Handle<Object> number);
Handle<String> Uint32ToString(uint32_t value);
@@ -460,7 +462,7 @@ class Factory {
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
#endif
- // Return a map using the map cache in the global context.
+ // Return a map using the map cache in the native context.
// The key the an ordered set of property names.
Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
Handle<FixedArray> keys);
@@ -499,14 +501,10 @@ class Factory {
Handle<String> name,
LanguageMode language_mode);
- Handle<DescriptorArray> CopyAppendCallbackDescriptors(
- Handle<DescriptorArray> array,
- Handle<Object> descriptors);
-
// Create a new map cache.
Handle<MapCache> NewMapCache(int at_least_space_for);
- // Update the map cache in the global context with (keys, map)
+ // Update the map cache in the native context with (keys, map)
Handle<MapCache> AddToMapCache(Handle<Context> context,
Handle<FixedArray> keys,
Handle<Map> map);
diff --git a/src/3rdparty/v8/src/flag-definitions.h b/src/3rdparty/v8/src/flag-definitions.h
index 31c2a3a..96d03fa 100644
--- a/src/3rdparty/v8/src/flag-definitions.h
+++ b/src/3rdparty/v8/src/flag-definitions.h
@@ -132,7 +132,9 @@ public:
// Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode")
-DEFINE_bool(es52_globals, false,
+DEFINE_bool(es5_readonly, true,
+ "activate correct semantics for inheriting readonliness")
+DEFINE_bool(es52_globals, true,
"activate new semantics for global var declarations")
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
@@ -142,14 +144,19 @@ DEFINE_bool(harmony_modules, false,
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
+DEFINE_bool(harmony_observation, false,
+ "enable harmony object observation (implies harmony collections")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
+DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony_modules, harmony_scoping)
+DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
+DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations,
true,
@@ -197,18 +204,35 @@ DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
-DEFINE_bool(array_index_dehoisting, false,
+DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting")
+DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
+DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
+DEFINE_bool(lookup_sample_by_shared, true,
+ "when picking a function to optimize, watch for shared function "
+ "info, not JSFunction itself")
+DEFINE_bool(cache_optimized_code, true,
+ "cache optimized code for closures")
DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
+DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
+DEFINE_bool(opt_safe_uint32_operations, true,
+ "allow uint32 values on optimize frames if they are used only in"
+ "safe operations")
+
+DEFINE_bool(parallel_recompilation, false,
+ "optimizing hot functions asynchronously on a separate thread")
+DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
+DEFINE_int(parallel_recompilation_queue_length, 2,
+ "the length of the parallel compilation queue")
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
@@ -225,7 +249,8 @@ DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
"weight back edges by jump distance for interrupt triggering")
-DEFINE_int(interrupt_budget, 5900,
+ // 0x1700 fits in the immediate field of an ARM instruction.
+DEFINE_int(interrupt_budget, 0x1700,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 15,
"percentage of ICs that must have type info to allow optimization")
@@ -260,9 +285,18 @@ DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available - this implies "
- "enabling ARMv7 instructions (ARM only)")
+ "enabling ARMv7 and VFP2 instructions (ARM only)")
+DEFINE_bool(enable_vfp2, true,
+ "enable use of VFP2 instructions if available")
DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
+DEFINE_bool(enable_sudiv, true,
+ "enable use of SDIV and UDIV instructions if available (ARM only)")
+DEFINE_bool(enable_movw_movt, false,
+ "enable loading 32-bit constant by means of movw/movt "
+ "instruction pairs (ARM only)")
+DEFINE_bool(enable_unaligned_accesses, true,
+ "enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
@@ -304,8 +338,8 @@ DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code")
-DEFINE_bool(trace_bailout, false,
- "print reasons for falling back to using the classic V8 backend")
+DEFINE_int(max_opt_count, 10,
+ "maximum number of optimization attempts before giving up.")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -346,27 +380,39 @@ DEFINE_bool(trace_gc, false,
DEFINE_bool(trace_gc_nvp, false,
"print one detailed trace line in name=value format "
"after each garbage collection")
+DEFINE_bool(trace_gc_ignore_scavenger, false,
+ "do not print trace line after scavenger collection")
DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(trace_fragmentation, false,
"report fragmentation for old pointer and data pages")
+DEFINE_bool(trace_external_memory, false,
+ "print amount of external allocated memory after each time "
+ "it is adjusted.")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
- "flush code that we expect not to use again before full gc")
+ "flush code that we expect not to use again (during full gc)")
+DEFINE_bool(flush_code_incrementally, false,
+ "flush code that we expect not to use again (incrementally)")
+DEFINE_bool(age_code, false,
+ "track un-executed functions to age code and flush only "
+ "old code")
DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
+DEFINE_bool(track_gc_object_stats, false,
+ "track object counts and memory usage")
+#ifdef VERIFY_HEAP
+DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
+#endif
// v8.cc
DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
-
-DEFINE_bool(send_idle_notification, false,
- "Send idle notifcation between stress runs.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
@@ -388,6 +434,8 @@ DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_bool(compact_code_space, true,
"Compact code space on full non-incremental collections")
+DEFINE_bool(incremental_code_compaction, true,
+ "Compact code space on full incremental collections")
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
@@ -400,6 +448,7 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
+DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
@@ -444,6 +493,10 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
"file in which to serialize heap")
#endif
+// mksnapshot.cc
+DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
+ " the snapshot (mksnapshot only)")
+
//
// Dev shell flags
//
@@ -529,7 +582,8 @@ DEFINE_bool(gc_greedy, false, "perform GC prior to some allocations")
DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
DEFINE_bool(code_stats, false, "report code statistics after GC")
-DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
+DEFINE_bool(verify_native_context_separation, false,
+ "verify that code holds on to at most one native context after GC")
DEFINE_bool(print_handles, false, "report handles after GC")
DEFINE_bool(print_global_handles, false, "report global handles after GC")
@@ -602,6 +656,8 @@ DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
+DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
+ "Specify the name of the file for fake gc mmap used in ll_prof")
//
// Disassembler only flags
diff --git a/src/3rdparty/v8/src/flags.cc b/src/3rdparty/v8/src/flags.cc
index 5720cbd..bca0eff 100644
--- a/src/3rdparty/v8/src/flags.cc
+++ b/src/3rdparty/v8/src/flags.cc
@@ -31,7 +31,7 @@
#include "v8.h"
#include "platform.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "string-stream.h"
@@ -343,6 +343,7 @@ static Flag* FindFlag(const char* name) {
int FlagList::SetFlagsFromCommandLine(int* argc,
char** argv,
bool remove_flags) {
+ int return_code = 0;
// parse arguments
for (int i = 1; i < *argc;) {
int j = i; // j > 0
@@ -368,7 +369,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
} else {
fprintf(stderr, "Error: unrecognized flag %s\n"
"Try --help for options\n", arg);
- return j;
+ return_code = j;
+ break;
}
}
@@ -382,7 +384,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
fprintf(stderr, "Error: missing value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
- return j;
+ return_code = j;
+ break;
}
}
@@ -424,7 +427,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
- return j;
+ return_code = j;
+ break;
}
// remove the flag & value from the command
@@ -451,7 +455,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
exit(0);
}
// parsed all flags successfully
- return 0;
+ return return_code;
}
diff --git a/src/3rdparty/v8/src/frames.cc b/src/3rdparty/v8/src/frames.cc
index e265341..6342852 100644
--- a/src/3rdparty/v8/src/frames.cc
+++ b/src/3rdparty/v8/src/frames.cc
@@ -469,8 +469,22 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
}
+Address StackFrame::UnpaddedFP() const {
+#if defined(V8_TARGET_ARCH_IA32)
+ if (!is_optimized()) return fp();
+ int32_t alignment_state = Memory::int32_at(
+ fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
+
+ return (alignment_state == kAlignmentPaddingPushed) ?
+ (fp() + kPointerSize) : fp();
+#else
+ return fp();
+#endif
+}
+
+
Code* EntryFrame::unchecked_code() const {
- return HEAP->raw_unchecked_js_entry_code();
+ return HEAP->js_entry_code();
}
@@ -493,7 +507,7 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
Code* EntryConstructFrame::unchecked_code() const {
- return HEAP->raw_unchecked_js_construct_entry_code();
+ return HEAP->js_construct_entry_code();
}
@@ -747,7 +761,7 @@ void JavaScriptFrame::PrintTop(FILE* file,
while (!it.done()) {
if (it.frame()->is_java_script()) {
JavaScriptFrame* frame = it.frame();
- if (frame->IsConstructor()) PrintF(file, "new ");
+ if (frame->IsConstructor()) FPrintF(file, "new ");
// function name
Object* maybe_fun = frame->function();
if (maybe_fun->IsJSFunction()) {
@@ -773,12 +787,12 @@ void JavaScriptFrame::PrintTop(FILE* file,
SmartArrayPointer<char> c_script_name =
script_name->ToCString(DISALLOW_NULLS,
ROBUST_STRING_TRAVERSAL);
- PrintF(file, " at %s:%d", *c_script_name, line);
+ FPrintF(file, " at %s:%d", *c_script_name, line);
} else {
- PrintF(file, "at <unknown>:%d", line);
+ FPrintF(file, " at <unknown>:%d", line);
}
} else {
- PrintF(file, " at <unknown>:<unknown>");
+ FPrintF(file, " at <unknown>:<unknown>");
}
}
} else {
@@ -789,14 +803,14 @@ void JavaScriptFrame::PrintTop(FILE* file,
// function arguments
// (we are intentionally only printing the actually
// supplied parameters, not all parameters required)
- PrintF(file, "(this=");
+ FPrintF(file, "(this=");
frame->receiver()->ShortPrint(file);
const int length = frame->ComputeParametersCount();
for (int i = 0; i < length; i++) {
- PrintF(file, ", ");
+ FPrintF(file, ", ");
frame->GetParameter(i)->ShortPrint(file);
}
- PrintF(file, ")");
+ FPrintF(file, ")");
}
break;
}
@@ -818,12 +832,23 @@ void FrameSummary::Print() {
}
+JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
+ int literal_id) {
+ if (literal_id == Translation::kSelfLiteralId) {
+ return JSFunction::cast(function());
+ }
+
+ return JSFunction::cast(literal_array->get(literal_id));
+}
+
+
void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
ASSERT(frames->length() == 0);
ASSERT(is_optimized());
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+ FixedArray* literal_array = data->LiteralArray();
// BUG(3243555): Since we don't have a lazy-deopt registered at
// throw-statements, we can't use the translation at the call-site of
@@ -850,11 +875,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::JS_FRAME) {
i--;
- int ast_id = it.Next();
- int function_id = it.Next();
+ BailoutId ast_id = BailoutId(it.Next());
+ JSFunction* function = LiteralAt(literal_array, it.Next());
it.Next(); // Skip height.
- JSFunction* function =
- JSFunction::cast(data->LiteralArray()->get(function_id));
// The translation commands are ordered and the receiver is always
// at the first position. Since we are always at a call when we need
@@ -961,6 +984,7 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+ FixedArray* literal_array = data->LiteralArray();
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
@@ -976,10 +1000,8 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
if (opcode == Translation::JS_FRAME) {
jsframe_count--;
it.Next(); // Skip ast id.
- int function_id = it.Next();
+ JSFunction* function = LiteralAt(literal_array, it.Next());
it.Next(); // Skip height.
- JSFunction* function =
- JSFunction::cast(data->LiteralArray()->get(function_id));
functions->Add(function);
} else {
// Skip over operands to advance to the next opcode.
@@ -1394,11 +1416,11 @@ class field##_Wrapper : public ZoneObject { \
STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER)
#undef DEFINE_WRAPPER
-static StackFrame* AllocateFrameCopy(StackFrame* frame) {
+static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
#define FRAME_TYPE_CASE(type, field) \
case StackFrame::type: { \
field##_Wrapper* wrapper = \
- new field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
+ new(zone) field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
return &wrapper->frame_; \
}
@@ -1410,11 +1432,11 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame) {
return NULL;
}
-Vector<StackFrame*> CreateStackMap() {
- ZoneList<StackFrame*> list(10);
+Vector<StackFrame*> CreateStackMap(Zone* zone) {
+ ZoneList<StackFrame*> list(10, zone);
for (StackFrameIterator it; !it.done(); it.Advance()) {
- StackFrame* frame = AllocateFrameCopy(it.frame());
- list.Add(frame);
+ StackFrame* frame = AllocateFrameCopy(it.frame(), zone);
+ list.Add(frame, zone);
}
return list.ToVector();
}
diff --git a/src/3rdparty/v8/src/frames.h b/src/3rdparty/v8/src/frames.h
index 78cdd0c..30f7e1f 100644
--- a/src/3rdparty/v8/src/frames.h
+++ b/src/3rdparty/v8/src/frames.h
@@ -206,6 +206,11 @@ class StackFrame BASE_EMBEDDED {
Address fp() const { return state_.fp; }
Address caller_sp() const { return GetCallerStackPointer(); }
+ // If this frame is optimized and was dynamically aligned return its old
+ // unaligned frame pointer. When the frame is deoptimized its FP will shift
+ // up one word and become unaligned.
+ Address UnpaddedFP() const;
+
Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; }
@@ -572,6 +577,8 @@ class OptimizedFrame : public JavaScriptFrame {
inline explicit OptimizedFrame(StackFrameIterator* iterator);
private:
+ JSFunction* LiteralAt(FixedArray* literal_array, int literal_id);
+
friend class StackFrameIterator;
};
@@ -888,7 +895,7 @@ class StackFrameLocator BASE_EMBEDDED {
// Reads all frames on the current stack and copies them into the current
// zone memory.
-Vector<StackFrame*> CreateStackMap();
+Vector<StackFrame*> CreateStackMap(Zone* zone);
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/full-codegen.cc b/src/3rdparty/v8/src/full-codegen.cc
index 9b1df4e..9592e0a 100644
--- a/src/3rdparty/v8/src/full-codegen.cc
+++ b/src/3rdparty/v8/src/full-codegen.cc
@@ -36,6 +36,7 @@
#include "prettyprinter.h"
#include "scopes.h"
#include "scopeinfo.h"
+#include "snapshot.h"
#include "stub-cache.h"
namespace v8 {
@@ -315,7 +316,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable() &&
!info->function()->flags()->Contains(kDontOptimize) &&
- info->function()->scope()->AllowsLazyRecompilation());
+ info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code);
@@ -352,7 +353,7 @@ unsigned FullCodeGenerator::EmitStackCheckTable() {
unsigned length = stack_checks_.length();
__ dd(length);
for (unsigned i = 0; i < length; ++i) {
- __ dd(stack_checks_[i].id);
+ __ dd(stack_checks_[i].id.ToInt());
__ dd(stack_checks_[i].pc_and_state);
}
return offset;
@@ -367,7 +368,7 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationOutputData> data = isolate()->factory()->
NewDeoptimizationOutputData(length, TENURED);
for (int i = 0; i < length; i++) {
- data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
+ data->SetAstId(i, bailout_entries_[i].id);
data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
}
code->set_deoptimization_data(*data);
@@ -382,6 +383,20 @@ void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
}
+void FullCodeGenerator::Initialize() {
+ // The generation of debug code must match between the snapshot code and the
+ // code that is generated later. This is assumed by the debugger when it is
+ // calculating PC offsets after generating a debug version of code. Therefore
+ // we disable the production of debug code in the full compiler if we are
+ // either generating a snapshot or we booted from a snapshot.
+ generate_debug_code_ = FLAG_debug_code &&
+ !Serializer::enabled() &&
+ !Snapshot::HaveASnapshotToStartFrom();
+ masm_->set_emit_debug_code(generate_debug_code_);
+ masm_->set_predictable_code_size(true);
+}
+
+
void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
if (type_feedback_cells_.is_empty()) return;
int length = type_feedback_cells_.length();
@@ -389,7 +404,7 @@ void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
isolate()->factory()->NewFixedArray(array_size, TENURED));
for (int i = 0; i < length; i++) {
- cache->SetAstId(i, Smi::FromInt(type_feedback_cells_[i].ast_id));
+ cache->SetAstId(i, type_feedback_cells_[i].ast_id);
cache->SetCell(i, *type_feedback_cells_[i].cell);
}
TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
@@ -420,7 +435,7 @@ void FullCodeGenerator::RecordJSReturnSite(Call* call) {
}
-void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
+void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
// There's no need to prepare this code for bailouts from already optimized
// code or code that can't be optimized.
if (!info_->HasDeoptimizationSupport()) return;
@@ -440,23 +455,23 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
}
}
#endif // DEBUG
- bailout_entries_.Add(entry);
+ bailout_entries_.Add(entry, zone());
}
void FullCodeGenerator::RecordTypeFeedbackCell(
- unsigned id, Handle<JSGlobalPropertyCell> cell) {
+ TypeFeedbackId id, Handle<JSGlobalPropertyCell> cell) {
TypeFeedbackCellEntry entry = { id, cell };
- type_feedback_cells_.Add(entry);
+ type_feedback_cells_.Add(entry, zone());
}
-void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
+void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a
// state.
ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
- stack_checks_.Add(entry);
+ stack_checks_.Add(entry, zone());
}
@@ -570,7 +585,7 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
ZoneList<Handle<Object> >* saved_globals = globals_;
- ZoneList<Handle<Object> > inner_globals(10);
+ ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
AstVisitor::VisitDeclarations(declarations);
@@ -589,27 +604,20 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
- Handle<JSModule> instance = module->interface()->Instance();
- ASSERT(!instance.is_null());
-
// Allocate a module context statically.
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+ Interface* interface = module->interface();
+ Handle<JSModule> instance = interface->Instance();
- // Generate code for module creation and linking.
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
- if (scope_info->HasContext()) {
- // Set up module context.
- __ Push(scope_info);
- __ Push(instance);
- __ CallRuntime(Runtime::kPushModuleContext, 2);
- StoreToFrameField(
- StandardFrameConstants::kContextOffset, context_register());
- }
+ // Set up module context.
+ __ Push(instance);
+ __ CallRuntime(Runtime::kPushModuleContext, 1);
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
Comment cmnt(masm_, "[ Declarations");
@@ -617,42 +625,21 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
}
scope_ = saved_scope;
- if (scope_info->HasContext()) {
- // Pop module context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(
- StandardFrameConstants::kContextOffset, context_register());
- }
-
- // Populate module instance object.
- const PropertyAttributes attr =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
- for (Interface::Iterator it = module->interface()->iterator();
- !it.done(); it.Advance()) {
- if (it.interface()->IsModule()) {
- Handle<Object> value = it.interface()->Instance();
- ASSERT(!value.is_null());
- JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
- } else {
- // TODO(rossberg): set proper getters instead of undefined...
- // instance->DefineAccessor(*it.name(), ACCESSOR_GETTER, *getter, attr);
- Handle<Object> value(isolate()->heap()->undefined_value());
- JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
- }
- }
- USE(instance->PreventExtensions());
+ // Pop module context.
+ LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
}
void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
- // Noting to do.
+ // Nothing to do.
// The instance object is resolved statically through the module's interface.
}
void FullCodeGenerator::VisitModulePath(ModulePath* module) {
- // Noting to do.
+ // Nothing to do.
// The instance object is resolved statically through the module's interface.
}
@@ -822,7 +809,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
Comment cmnt(masm_, is_logical_and ? "[ Logical AND" : "[ Logical OR");
Expression* left = expr->left();
Expression* right = expr->right();
- int right_id = expr->RightId();
+ BailoutId right_id = expr->RightId();
Label done;
if (context()->IsTest()) {
@@ -916,25 +903,36 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
Scope* saved_scope = scope();
// Push a block context when entering a block with block scoped variables.
if (stmt->scope() != NULL) {
- { Comment cmnt(masm_, "[ Extend block context");
- scope_ = stmt->scope();
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
- int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
- __ Push(scope_info);
- PushFunctionArgumentForContextAllocation();
- if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
- FastNewBlockContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kPushBlockContext, 2);
+ scope_ = stmt->scope();
+ if (scope_->is_module_scope()) {
+ // If this block is a module body, then we have already allocated and
+ // initialized the declarations earlier. Just push the context.
+ ASSERT(!scope_->interface()->Instance().is_null());
+ __ Push(scope_->interface()->Instance());
+ __ CallRuntime(Runtime::kPushModuleContext, 1);
+ StoreToFrameField(
+ StandardFrameConstants::kContextOffset, context_register());
+ } else {
+ { Comment cmnt(masm_, "[ Extend block context");
+ Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+ int heap_slots =
+ scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
+ __ Push(scope_info);
+ PushFunctionArgumentForContextAllocation();
+ if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
+ FastNewBlockContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kPushBlockContext, 2);
+ }
+
+ // Replace the context stored in the frame.
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+ }
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(scope_->declarations());
}
-
- // Replace the context stored in the frame.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope_->declarations());
}
}
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
diff --git a/src/3rdparty/v8/src/full-codegen.h b/src/3rdparty/v8/src/full-codegen.h
index a89b446..972839e 100644
--- a/src/3rdparty/v8/src/full-codegen.h
+++ b/src/3rdparty/v8/src/full-codegen.h
@@ -86,11 +86,18 @@ class FullCodeGenerator: public AstVisitor {
globals_(NULL),
context_(NULL),
bailout_entries_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0),
- stack_checks_(2), // There's always at least one.
+ ? info->function()->ast_node_count() : 0,
+ info->zone()),
+ stack_checks_(2, info->zone()), // There's always at least one.
type_feedback_cells_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0),
- ic_total_count_(0) { }
+ ? info->function()->ast_node_count() : 0,
+ info->zone()),
+ ic_total_count_(0),
+ zone_(info->zone()) {
+ Initialize();
+ }
+
+ void Initialize();
static bool MakeCode(CompilationInfo* info);
@@ -108,6 +115,23 @@ class FullCodeGenerator: public AstVisitor {
return NULL;
}
+ Zone* zone() const { return zone_; }
+
+ static const int kMaxBackEdgeWeight = 127;
+
+#if V8_TARGET_ARCH_IA32
+ static const int kBackEdgeDistanceUnit = 100;
+#elif V8_TARGET_ARCH_X64
+ static const int kBackEdgeDistanceUnit = 162;
+#elif V8_TARGET_ARCH_ARM
+ static const int kBackEdgeDistanceUnit = 142;
+#elif V8_TARGET_ARCH_MIPS
+ static const int kBackEdgeDistanceUnit = 142;
+#else
+#error Unsupported target architecture.
+#endif
+
+
private:
class Breakable;
class Iteration;
@@ -236,7 +260,7 @@ class FullCodeGenerator: public AstVisitor {
// The finally block of a try/finally statement.
class Finally : public NestedStatement {
public:
- static const int kElementCount = 2;
+ static const int kElementCount = 5;
explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
virtual ~Finally() {}
@@ -393,11 +417,12 @@ class FullCodeGenerator: public AstVisitor {
// Bailout support.
void PrepareForBailout(Expression* node, State state);
- void PrepareForBailoutForId(unsigned id, State state);
+ void PrepareForBailoutForId(BailoutId id, State state);
// Cache cell support. This associates AST ids with global property cells
// that will be cleared during GC and collected by the type-feedback oracle.
- void RecordTypeFeedbackCell(unsigned id, Handle<JSGlobalPropertyCell> cell);
+ void RecordTypeFeedbackCell(TypeFeedbackId id,
+ Handle<JSGlobalPropertyCell> cell);
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -424,7 +449,7 @@ class FullCodeGenerator: public AstVisitor {
// of code inside the loop.
void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
// Record the OSR AST id corresponding to a stack check in the code.
- void RecordStackCheck(unsigned osr_ast_id);
+ void RecordStackCheck(BailoutId osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
@@ -515,7 +540,7 @@ class FullCodeGenerator: public AstVisitor {
void CallIC(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId id = TypeFeedbackId::None());
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
@@ -587,12 +612,12 @@ class FullCodeGenerator: public AstVisitor {
Handle<FixedArray> handler_table() { return handler_table_; }
struct BailoutEntry {
- unsigned id;
+ BailoutId id;
unsigned pc_and_state;
};
struct TypeFeedbackCellEntry {
- unsigned ast_id;
+ TypeFeedbackId ast_id;
Handle<JSGlobalPropertyCell> cell;
};
@@ -787,6 +812,8 @@ class FullCodeGenerator: public AstVisitor {
int ic_total_count_;
Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_;
+ bool generate_debug_code_;
+ Zone* zone_;
friend class NestedStatement;
@@ -797,16 +824,16 @@ class FullCodeGenerator: public AstVisitor {
// A map from property names to getter/setter pairs allocated in the zone.
class AccessorTable: public TemplateHashMap<Literal,
ObjectLiteral::Accessors,
- ZoneListAllocationPolicy> {
+ ZoneAllocationPolicy> {
public:
explicit AccessorTable(Zone* zone) :
- TemplateHashMap<Literal,
- ObjectLiteral::Accessors,
- ZoneListAllocationPolicy>(Literal::Match),
+ TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ ZoneAllocationPolicy>(Literal::Match,
+ ZoneAllocationPolicy(zone)),
zone_(zone) { }
Iterator lookup(Literal* literal) {
- Iterator it = find(literal, true);
+ Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
return it;
}
diff --git a/src/3rdparty/v8/src/func-name-inferrer.cc b/src/3rdparty/v8/src/func-name-inferrer.cc
index 239358d..2dd0bbc 100644
--- a/src/3rdparty/v8/src/func-name-inferrer.cc
+++ b/src/3rdparty/v8/src/func-name-inferrer.cc
@@ -34,11 +34,12 @@
namespace v8 {
namespace internal {
-FuncNameInferrer::FuncNameInferrer(Isolate* isolate)
+FuncNameInferrer::FuncNameInferrer(Isolate* isolate, Zone* zone)
: isolate_(isolate),
- entries_stack_(10),
- names_stack_(5),
- funcs_to_infer_(4) {
+ entries_stack_(10, zone),
+ names_stack_(5, zone),
+ funcs_to_infer_(4, zone),
+ zone_(zone) {
}
@@ -48,21 +49,21 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
// and starts with a capital letter.
if (name->length() > 0 && Runtime::IsUpperCaseChar(
isolate()->runtime_state(), name->Get(0))) {
- names_stack_.Add(Name(name, kEnclosingConstructorName));
+ names_stack_.Add(Name(name, kEnclosingConstructorName), zone());
}
}
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) {
- names_stack_.Add(Name(name, kLiteralName));
+ names_stack_.Add(Name(name, kLiteralName), zone());
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) {
- names_stack_.Add(Name(name, kVariableName));
+ names_stack_.Add(Name(name, kVariableName), zone());
}
}
diff --git a/src/3rdparty/v8/src/func-name-inferrer.h b/src/3rdparty/v8/src/func-name-inferrer.h
index 1a57268..f57e778 100644
--- a/src/3rdparty/v8/src/func-name-inferrer.h
+++ b/src/3rdparty/v8/src/func-name-inferrer.h
@@ -45,7 +45,7 @@ class Isolate;
// a name.
class FuncNameInferrer : public ZoneObject {
public:
- explicit FuncNameInferrer(Isolate* isolate);
+ FuncNameInferrer(Isolate* isolate, Zone* zone);
// Returns whether we have entered name collection state.
bool IsOpen() const { return !entries_stack_.is_empty(); }
@@ -55,7 +55,7 @@ class FuncNameInferrer : public ZoneObject {
// Enters name collection state.
void Enter() {
- entries_stack_.Add(names_stack_.length());
+ entries_stack_.Add(names_stack_.length(), zone());
}
// Pushes an encountered name onto names stack when in collection state.
@@ -66,7 +66,7 @@ class FuncNameInferrer : public ZoneObject {
// Adds a function to infer name for.
void AddFunction(FunctionLiteral* func_to_infer) {
if (IsOpen()) {
- funcs_to_infer_.Add(func_to_infer);
+ funcs_to_infer_.Add(func_to_infer, zone());
}
}
@@ -88,6 +88,8 @@ class FuncNameInferrer : public ZoneObject {
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
+ if (entries_stack_.is_empty())
+ funcs_to_infer_.Clear();
}
private:
@@ -103,6 +105,7 @@ class FuncNameInferrer : public ZoneObject {
};
Isolate* isolate() { return isolate_; }
+ Zone* zone() const { return zone_; }
// Constructs a full name in dotted notation from gathered names.
Handle<String> MakeNameFromStack();
@@ -117,6 +120,7 @@ class FuncNameInferrer : public ZoneObject {
ZoneList<int> entries_stack_;
ZoneList<Name> names_stack_;
ZoneList<FunctionLiteral*> funcs_to_infer_;
+ Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
};
diff --git a/src/3rdparty/v8/src/gdb-jit.cc b/src/3rdparty/v8/src/gdb-jit.cc
index d3cd447..dde6bbd 100644
--- a/src/3rdparty/v8/src/gdb-jit.cc
+++ b/src/3rdparty/v8/src/gdb-jit.cc
@@ -31,11 +31,13 @@
#include "bootstrapper.h"
#include "compiler.h"
+#include "frames.h"
+#include "frames-inl.h"
#include "global-handles.h"
#include "messages.h"
-#include "platform.h"
#include "natives.h"
-#include "scopeinfo.h"
+#include "platform.h"
+#include "scopes.h"
namespace v8 {
namespace internal {
@@ -194,7 +196,7 @@ class DebugSectionBase : public ZoneObject {
virtual void WriteBody(Writer::Slot<THeader> header, Writer* writer) {
uintptr_t start = writer->position();
- if (WriteBody(writer)) {
+ if (WriteBodyInternal(writer)) {
uintptr_t end = writer->position();
header->offset = start;
#if defined(__MACH_O)
@@ -204,7 +206,7 @@ class DebugSectionBase : public ZoneObject {
}
}
- virtual bool WriteBody(Writer* writer) {
+ virtual bool WriteBodyInternal(Writer* writer) {
return false;
}
@@ -340,14 +342,14 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
uintptr_t start = w->position();
- if (WriteBody(w)) {
+ if (WriteBodyInternal(w)) {
uintptr_t end = w->position();
header->offset = start;
header->size = end - start;
}
}
- virtual bool WriteBody(Writer* w) {
+ virtual bool WriteBodyInternal(Writer* w) {
return false;
}
@@ -627,9 +629,9 @@ class MachO BASE_EMBEDDED {
#if defined(__ELF)
class ELF BASE_EMBEDDED {
public:
- ELF() : sections_(6) {
- sections_.Add(new ELFSection("", ELFSection::TYPE_NULL, 0));
- sections_.Add(new StringTable(".shstrtab"));
+ ELF(Zone* zone) : sections_(6, zone) {
+ sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone);
+ sections_.Add(new(zone) StringTable(".shstrtab"), zone);
}
void Write(Writer* w) {
@@ -642,8 +644,8 @@ class ELF BASE_EMBEDDED {
return sections_[index];
}
- uint32_t AddSection(ELFSection* section) {
- sections_.Add(section);
+ uint32_t AddSection(ELFSection* section, Zone* zone) {
+ sections_.Add(section, zone);
section->set_index(sections_.length() - 1);
return sections_.length() - 1;
}
@@ -675,7 +677,7 @@ class ELF BASE_EMBEDDED {
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#elif defined(V8_TARGET_ARCH_X64)
const uint8_t ident[16] =
- { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0 , 0, 0, 0, 0, 0, 0};
+ { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#else
#error Unsupported target architecture.
#endif
@@ -852,10 +854,10 @@ class ELFSymbol BASE_EMBEDDED {
class ELFSymbolTable : public ELFSection {
public:
- explicit ELFSymbolTable(const char* name)
+ ELFSymbolTable(const char* name, Zone* zone)
: ELFSection(name, TYPE_SYMTAB, sizeof(uintptr_t)),
- locals_(1),
- globals_(1) {
+ locals_(1, zone),
+ globals_(1, zone) {
}
virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
@@ -883,11 +885,11 @@ class ELFSymbolTable : public ELFSection {
strtab->DetachWriter();
}
- void Add(const ELFSymbol& symbol) {
+ void Add(const ELFSymbol& symbol, Zone* zone) {
if (symbol.binding() == ELFSymbol::BIND_LOCAL) {
- locals_.Add(symbol);
+ locals_.Add(symbol, zone);
} else {
- globals_.Add(symbol);
+ globals_.Add(symbol, zone);
}
}
@@ -1019,26 +1021,29 @@ class CodeDescription BASE_EMBEDDED {
static void CreateSymbolsTable(CodeDescription* desc,
ELF* elf,
int text_section_index) {
- ELFSymbolTable* symtab = new ELFSymbolTable(".symtab");
- StringTable* strtab = new StringTable(".strtab");
+ Zone* zone = desc->info()->zone();
+ ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone);
+ StringTable* strtab = new(zone) StringTable(".strtab");
// Symbol table should be followed by the linked string table.
- elf->AddSection(symtab);
- elf->AddSection(strtab);
+ elf->AddSection(symtab, zone);
+ elf->AddSection(strtab, zone);
symtab->Add(ELFSymbol("V8 Code",
0,
0,
ELFSymbol::BIND_LOCAL,
ELFSymbol::TYPE_FILE,
- ELFSection::INDEX_ABSOLUTE));
+ ELFSection::INDEX_ABSOLUTE),
+ zone);
symtab->Add(ELFSymbol(desc->name(),
0,
desc->CodeSize(),
ELFSymbol::BIND_GLOBAL,
ELFSymbol::TYPE_FUNC,
- text_section_index));
+ text_section_index),
+ zone);
}
#endif // defined(__ELF)
@@ -1074,7 +1079,7 @@ class DebugInfoSection : public DebugSection {
DW_ATE_SIGNED = 0x5
};
- bool WriteBody(Writer* w) {
+ bool WriteBodyInternal(Writer* w) {
uintptr_t cu_start = w->position();
Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
@@ -1094,8 +1099,7 @@ class DebugInfoSection : public DebugSection {
w->WriteString("v8value");
if (desc_->IsInfoAvailable()) {
- CompilationInfo* info = desc_->info();
- ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
+ Scope* scope = desc_->info()->scope();
w->WriteULEB128(2);
w->WriteString(desc_->name());
w->Write<intptr_t>(desc_->CodeStart());
@@ -1106,23 +1110,27 @@ class DebugInfoSection : public DebugSection {
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
#elif defined(V8_TARGET_ARCH_X64)
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
+#elif defined(V8_TARGET_ARCH_ARM)
+ UNIMPLEMENTED();
+#elif defined(V8_TARGET_ARCH_MIPS)
+ UNIMPLEMENTED();
#else
#error Unsupported target architecture.
#endif
fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start));
- int params = scope_info.number_of_parameters();
- int slots = scope_info.number_of_stack_slots();
- int context_slots = scope_info.number_of_context_slots();
+ int params = scope->num_parameters();
+ int slots = scope->num_stack_slots();
+ int context_slots = scope->ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
- int locals = scope_info.LocalCount();
+ int locals = scope->StackLocalCount();
int current_abbreviation = 4;
for (int param = 0; param < params; ++param) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
- *scope_info.ParameterName(param)->ToCString(DISALLOW_NULLS));
+ *scope->parameter(param)->name()->ToCString(DISALLOW_NULLS));
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1148,7 +1156,7 @@ class DebugInfoSection : public DebugSection {
ASSERT(Context::CLOSURE_INDEX == 0);
ASSERT(Context::PREVIOUS_INDEX == 1);
ASSERT(Context::EXTENSION_INDEX == 2);
- ASSERT(Context::GLOBAL_INDEX == 3);
+ ASSERT(Context::GLOBAL_OBJECT_INDEX == 3);
w->WriteULEB128(current_abbreviation++);
w->WriteString(".closure");
w->WriteULEB128(current_abbreviation++);
@@ -1167,10 +1175,13 @@ class DebugInfoSection : public DebugSection {
w->WriteString(builder.Finalize());
}
+ ZoneList<Variable*> stack_locals(locals, scope->zone());
+ ZoneList<Variable*> context_locals(context_slots, scope->zone());
+ scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
for (int local = 0; local < locals; ++local) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
- *scope_info.LocalName(local)->ToCString(DISALLOW_NULLS));
+ *stack_locals[local]->name()->ToCString(DISALLOW_NULLS));
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1287,7 +1298,7 @@ class DebugAbbrevSection : public DebugSection {
w->WriteULEB128(0);
}
- bool WriteBody(Writer* w) {
+ bool WriteBodyInternal(Writer* w) {
int current_abbreviation = 1;
bool extra_info = desc_->IsInfoAvailable();
ASSERT(desc_->IsLineInfoAvailable());
@@ -1306,14 +1317,13 @@ class DebugAbbrevSection : public DebugSection {
w->WriteULEB128(0);
if (extra_info) {
- CompilationInfo* info = desc_->info();
- ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
- int params = scope_info.number_of_parameters();
- int slots = scope_info.number_of_stack_slots();
- int context_slots = scope_info.number_of_context_slots();
+ Scope* scope = desc_->info()->scope();
+ int params = scope->num_parameters();
+ int slots = scope->num_stack_slots();
+ int context_slots = scope->ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
- int locals = scope_info.LocalCount();
+ int locals = scope->StackLocalCount();
int total_children =
params + slots + context_slots + internal_slots + locals + 2;
@@ -1418,7 +1428,7 @@ class DebugLineSection : public DebugSection {
DW_LNE_DEFINE_FILE = 3
};
- bool WriteBody(Writer* w) {
+ bool WriteBodyInternal(Writer* w) {
// Write prologue.
Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
@@ -1558,7 +1568,7 @@ class DebugLineSection : public DebugSection {
class UnwindInfoSection : public DebugSection {
public:
explicit UnwindInfoSection(CodeDescription* desc);
- virtual bool WriteBody(Writer* w);
+ virtual bool WriteBodyInternal(Writer* w);
int WriteCIE(Writer* w);
void WriteFDE(Writer* w, int);
@@ -1770,7 +1780,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
}
-bool UnwindInfoSection::WriteBody(Writer* w) {
+bool UnwindInfoSection::WriteBodyInternal(Writer* w) {
uint32_t cie_position = WriteCIE(w);
WriteFDE(w, cie_position);
return true;
@@ -1780,13 +1790,14 @@ bool UnwindInfoSection::WriteBody(Writer* w) {
#endif // V8_TARGET_ARCH_X64
static void CreateDWARFSections(CodeDescription* desc, DebugObject* obj) {
+ Zone* zone = desc->info()->zone();
if (desc->IsLineInfoAvailable()) {
- obj->AddSection(new DebugInfoSection(desc));
- obj->AddSection(new DebugAbbrevSection(desc));
- obj->AddSection(new DebugLineSection(desc));
+ obj->AddSection(new(zone) DebugInfoSection(desc), zone);
+ obj->AddSection(new(zone) DebugAbbrevSection(desc), zone);
+ obj->AddSection(new(zone) DebugLineSection(desc), zone);
}
#ifdef V8_TARGET_ARCH_X64
- obj->AddSection(new UnwindInfoSection(desc));
+ obj->AddSection(new(zone) UnwindInfoSection(desc), zone);
#endif
}
@@ -1905,7 +1916,8 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
- ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+ Zone* zone = desc->info()->zone();
+ ZoneScope zone_scope(zone, DELETE_ON_EXIT);
#ifdef __MACH_O
MachO mach_o;
Writer w(&mach_o);
@@ -1918,17 +1930,19 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
#else
- ELF elf;
+ ELF elf(zone);
Writer w(&elf);
int text_section_index = elf.AddSection(
- new FullHeaderELFSection(".text",
- ELFSection::TYPE_NOBITS,
- kCodeAlignment,
- desc->CodeStart(),
- 0,
- desc->CodeSize(),
- ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC));
+ new(zone) FullHeaderELFSection(
+ ".text",
+ ELFSection::TYPE_NOBITS,
+ kCodeAlignment,
+ desc->CodeStart(),
+ 0,
+ desc->CodeSize(),
+ ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC),
+ zone);
CreateSymbolsTable(desc, &elf, text_section_index);
diff --git a/src/3rdparty/v8/src/global-handles.cc b/src/3rdparty/v8/src/global-handles.cc
index 9c0ad45..0006f8e 100644
--- a/src/3rdparty/v8/src/global-handles.cc
+++ b/src/3rdparty/v8/src/global-handles.cc
@@ -69,6 +69,7 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0;
independent_ = false;
+ partially_dependent_ = false;
in_new_space_list_ = false;
parameter_or_next_free_.next_free = NULL;
callback_ = NULL;
@@ -89,6 +90,7 @@ class GlobalHandles::Node {
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
independent_ = false;
+ partially_dependent_ = false;
state_ = NORMAL;
parameter_or_next_free_.parameter = NULL;
callback_ = NULL;
@@ -154,6 +156,15 @@ class GlobalHandles::Node {
}
bool is_independent() const { return independent_; }
+ void MarkPartiallyDependent(GlobalHandles* global_handles) {
+ ASSERT(state_ != FREE);
+ if (global_handles->isolate()->heap()->InNewSpace(object_)) {
+ partially_dependent_ = true;
+ }
+ }
+ bool is_partially_dependent() const { return partially_dependent_; }
+ void clear_partially_dependent() { partially_dependent_ = false; }
+
// In-new-space-list flag accessors.
void set_in_new_space_list(bool v) { in_new_space_list_ = v; }
bool is_in_new_space_list() const { return in_new_space_list_; }
@@ -260,6 +271,7 @@ class GlobalHandles::Node {
State state_ : 4;
bool independent_ : 1;
+ bool partially_dependent_ : 1;
bool in_new_space_list_ : 1;
// Handle specific callback.
@@ -448,6 +460,16 @@ void GlobalHandles::MarkIndependent(Object** location) {
}
+void GlobalHandles::MarkPartiallyDependent(Object** location) {
+ Node::FromLocation(location)->MarkPartiallyDependent(this);
+}
+
+
+bool GlobalHandles::IsIndependent(Object** location) {
+ return Node::FromLocation(location)->is_independent();
+}
+
+
bool GlobalHandles::IsNearDeath(Object** location) {
return Node::FromLocation(location)->IsNearDeath();
}
@@ -462,6 +484,9 @@ void GlobalHandles::SetWrapperClassId(Object** location, uint16_t class_id) {
Node::FromLocation(location)->set_wrapper_class_id(class_id);
}
+uint16_t GlobalHandles::GetWrapperClassId(Object** location) {
+ return Node::FromLocation(location)->wrapper_class_id();
+}
void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
@@ -493,8 +518,9 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent())) {
- v->VisitPointer(node->location());
+ (node->IsWeakRetainer() && !node->is_independent() &&
+ !node->is_partially_dependent())) {
+ v->VisitPointer(node->location());
}
}
}
@@ -505,8 +531,8 @@ void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
- if (node->is_independent() && node->IsWeak() &&
- f(isolate_->heap(), node->location())) {
+ if ((node->is_independent() || node->is_partially_dependent()) &&
+ node->IsWeak() && f(isolate_->heap(), node->location())) {
node->MarkPending();
}
}
@@ -517,7 +543,8 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
- if (node->is_independent() && node->IsWeakRetainer()) {
+ if ((node->is_independent() || node->is_partially_dependent()) &&
+ node->IsWeakRetainer()) {
v->VisitPointer(node->location());
}
}
@@ -539,7 +566,10 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
// Skip dependent handles. Their weak callbacks might expect to be
// called between two global garbage collection callbacks which
// are not called for minor collections.
- if (!node->is_independent()) continue;
+ if (!node->is_independent() && !node->is_partially_dependent()) {
+ continue;
+ }
+ node->clear_partially_dependent();
if (node->PostGarbageCollectionProcessing(isolate_, this)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of
@@ -555,6 +585,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
+ it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
@@ -602,7 +633,7 @@ void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->has_wrapper_class_id() && it.node()->IsRetainer()) {
+ if (it.node()->IsRetainer() && it.node()->has_wrapper_class_id()) {
v->VisitEmbedderReference(it.node()->location(),
it.node()->wrapper_class_id());
}
diff --git a/src/3rdparty/v8/src/global-handles.h b/src/3rdparty/v8/src/global-handles.h
index ddf5fe2..482baef 100644
--- a/src/3rdparty/v8/src/global-handles.h
+++ b/src/3rdparty/v8/src/global-handles.h
@@ -131,6 +131,7 @@ class GlobalHandles {
WeakReferenceCallback callback);
static void SetWrapperClassId(Object** location, uint16_t class_id);
+ static uint16_t GetWrapperClassId(Object** location);
// Returns the current number of weak handles.
int NumberOfWeakHandles() { return number_of_weak_handles_; }
@@ -154,6 +155,11 @@ class GlobalHandles {
// Clear the weakness of a global handle.
void MarkIndependent(Object** location);
+ // Mark the reference to this object externaly unreachable.
+ void MarkPartiallyDependent(Object** location);
+
+ static bool IsIndependent(Object** location);
+
// Tells whether global handle is near death.
static bool IsNearDeath(Object** location);
@@ -192,16 +198,17 @@ class GlobalHandles {
// Iterates over strong and dependent handles. See the node above.
void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
- // Finds weak independent handles satisfying the callback predicate
- // and marks them as pending. See the note above.
+ // Finds weak independent or partially independent handles satisfying
+ // the callback predicate and marks them as pending. See the note above.
void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
- // Iterates over weak independent handles. See the note above.
+ // Iterates over weak independent or partially independent handles.
+ // See the note above.
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
// Add an object group.
// Should be only used in GC callback function before a collection.
- // All groups are destroyed after a mark-compact collection.
+ // All groups are destroyed after a garbage collection.
void AddObjectGroup(Object*** handles,
size_t length,
v8::RetainedObjectInfo* info);
diff --git a/src/3rdparty/v8/src/globals.h b/src/3rdparty/v8/src/globals.h
index 54d628e..74c12f8 100644
--- a/src/3rdparty/v8/src/globals.h
+++ b/src/3rdparty/v8/src/globals.h
@@ -74,7 +74,7 @@ namespace internal {
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
-#elif defined(__ARMEL__)
+#elif defined(__ARMEL__) || defined(_M_ARM)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
// Some CPU-OS combinations allow unaligned access on ARM. We assume
@@ -128,7 +128,7 @@ namespace internal {
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
-#if (defined(V8_TARGET_ARCH_ARM) && !defined(V8_HOST_ARCH_ARM))
+#if (defined(V8_TARGET_ARCH_ARM) && !defined(V8_HOST_ARCH_ARM) && !defined(_WIN32_WCE))
#define USE_SIMULATOR 1
#endif
#if (defined(V8_TARGET_ARCH_MIPS) && !defined(V8_HOST_ARCH_MIPS))
@@ -136,21 +136,6 @@ namespace internal {
#endif
#endif
-// Define unaligned read for the target architectures supporting it.
-#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#elif V8_TARGET_ARCH_ARM
-// Some CPU-OS combinations allow unaligned access on ARM. We assume
-// that unaligned accesses are not allowed unless the build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#endif
-#elif V8_TARGET_ARCH_MIPS
-#else
-#error Target architecture is not supported by v8
-#endif
-
// Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden
@@ -203,6 +188,7 @@ typedef byte* Address;
#define V8PRIxPTR V8_PTR_PREFIX "x"
#define V8PRIdPTR V8_PTR_PREFIX "d"
+#define V8PRIuPTR V8_PTR_PREFIX "u"
// Fix for Mac OS X defining uintptr_t as "unsigned long":
#if defined(__APPLE__) && defined(__MACH__)
@@ -360,6 +346,20 @@ F FUNCTION_CAST(Address addr) {
#define MUST_USE_RESULT
#endif
+
+// Define DISABLE_ASAN macros.
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define DISABLE_ASAN __attribute__((no_address_safety_analysis))
+#endif
+#endif
+
+
+#ifndef DISABLE_ASAN
+#define DISABLE_ASAN
+#endif
+
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
// (sorted alphabetically)
diff --git a/src/3rdparty/v8/src/handles-inl.h b/src/3rdparty/v8/src/handles-inl.h
index a5c81ce..1307986 100644
--- a/src/3rdparty/v8/src/handles-inl.h
+++ b/src/3rdparty/v8/src/handles-inl.h
@@ -149,25 +149,31 @@ T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
#ifdef DEBUG
inline NoHandleAllocation::NoHandleAllocation() {
+ Isolate* isolate = Isolate::Current();
v8::ImplementationUtilities::HandleScopeData* current =
- Isolate::Current()->handle_scope_data();
+ isolate->handle_scope_data();
- // Shrink the current handle scope to make it impossible to do
- // handle allocations without an explicit handle scope.
- current->limit = current->next;
+ active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active_) {
+ // Shrink the current handle scope to make it impossible to do
+ // handle allocations without an explicit handle scope.
+ current->limit = current->next;
- level_ = current->level;
- current->level = 0;
+ level_ = current->level;
+ current->level = 0;
+ }
}
inline NoHandleAllocation::~NoHandleAllocation() {
- // Restore state in current handle scope to re-enable handle
- // allocations.
- v8::ImplementationUtilities::HandleScopeData* data =
- Isolate::Current()->handle_scope_data();
- ASSERT_EQ(0, data->level);
- data->level = level_;
+ if (active_) {
+ // Restore state in current handle scope to re-enable handle
+ // allocations.
+ v8::ImplementationUtilities::HandleScopeData* data =
+ Isolate::Current()->handle_scope_data();
+ ASSERT_EQ(0, data->level);
+ data->level = level_;
+ }
}
#endif
diff --git a/src/3rdparty/v8/src/handles.cc b/src/3rdparty/v8/src/handles.cc
index def1604..a6192d8 100644
--- a/src/3rdparty/v8/src/handles.cc
+++ b/src/3rdparty/v8/src/handles.cc
@@ -165,7 +165,7 @@ void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
func->shared()->set_expected_nof_properties(nof);
if (func->has_initial_map()) {
Handle<Map> new_initial_map =
- func->GetIsolate()->factory()->CopyMapDropTransitions(
+ func->GetIsolate()->factory()->CopyMap(
Handle<Map>(func->initial_map()));
new_initial_map->set_unused_property_fields(nof);
func->set_initial_map(*new_initial_map);
@@ -561,6 +561,9 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
result = enum_fun(info);
}
}
+#if ENABLE_EXTRA_CHECKS
+ CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
+#endif
return result;
}
@@ -581,12 +584,34 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
// Leaving JavaScript.
VMState state(isolate, EXTERNAL);
result = enum_fun(info);
+#if ENABLE_EXTRA_CHECKS
+ CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
+#endif
}
}
return result;
}
+Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) {
+ Isolate* isolate = script->GetIsolate();
+ Handle<String> name_or_source_url_key =
+ isolate->factory()->LookupAsciiSymbol("nameOrSourceURL");
+ Handle<JSValue> script_wrapper = GetScriptWrapper(script);
+ Handle<Object> property = GetProperty(script_wrapper,
+ name_or_source_url_key);
+ ASSERT(property->IsJSFunction());
+ Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+ bool caught_exception;
+ Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
+ NULL, &caught_exception);
+ if (caught_exception) {
+ result = isolate->factory()->undefined_value();
+ }
+ return result;
+}
+
+
static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
int len = array->length();
for (int i = 0; i < len; i++) {
@@ -604,7 +629,7 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
Isolate* isolate = object->GetIsolate();
Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
- isolate->context()->global_context()->arguments_boilerplate(),
+ isolate->context()->native_context()->arguments_boilerplate(),
isolate);
Handle<JSFunction> arguments_function = Handle<JSFunction>(
JSFunction::cast(arguments_boilerplate->map()->constructor()),
@@ -699,77 +724,134 @@ Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
}
+Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length) {
+ ASSERT(array->length() >= length);
+ if (array->length() == length) return array;
+
+ Handle<FixedArray> new_array =
+ array->GetIsolate()->factory()->NewFixedArray(length);
+ for (int i = 0; i < length; ++i) new_array->set(i, array->get(i));
+ return new_array;
+}
+
+
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result) {
- int index = 0;
Isolate* isolate = object->GetIsolate();
if (object->HasFastProperties()) {
if (object->map()->instance_descriptors()->HasEnumCache()) {
- isolate->counters()->enum_cache_hits()->Increment();
+ int own_property_count = object->map()->EnumLength();
+ // If we have an enum cache, but the enum length of the given map is set
+ // to kInvalidEnumCache, this means that the map itself has never used the
+ // present enum cache. The first step to using the cache is to set the
+ // enum length of the map by counting the number of own descriptors that
+ // are not DONT_ENUM.
+ if (own_property_count == Map::kInvalidEnumCache) {
+ own_property_count = object->map()->NumberOfDescribedProperties(
+ OWN_DESCRIPTORS, DONT_ENUM);
+
+ if (cache_result) object->map()->SetEnumLength(own_property_count);
+ }
+
DescriptorArray* desc = object->map()->instance_descriptors();
- return Handle<FixedArray>(FixedArray::cast(desc->GetEnumCache()),
- isolate);
+ Handle<FixedArray> keys(desc->GetEnumCache(), isolate);
+
+ // In case the number of properties required in the enum are actually
+ // present, we can reuse the enum cache. Otherwise, this means that the
+ // enum cache was generated for a previous (smaller) version of the
+ // Descriptor Array. In that case we regenerate the enum cache.
+ if (own_property_count <= keys->length()) {
+ isolate->counters()->enum_cache_hits()->Increment();
+ return ReduceFixedArrayTo(keys, own_property_count);
+ }
}
- isolate->counters()->enum_cache_misses()->Increment();
+
Handle<Map> map(object->map());
- int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
+ if (map->instance_descriptors()->IsEmpty()) {
+ isolate->counters()->enum_cache_hits()->Increment();
+ if (cache_result) map->SetEnumLength(0);
+ return isolate->factory()->empty_fixed_array();
+ }
- Handle<FixedArray> indices;
- Handle<FixedArray> sort_array2;
+ isolate->counters()->enum_cache_misses()->Increment();
+ int num_enum = map->NumberOfDescribedProperties(ALL_DESCRIPTORS, DONT_ENUM);
- if (cache_result) {
- indices = isolate->factory()->NewFixedArray(num_enum);
- sort_array2 = isolate->factory()->NewFixedArray(num_enum);
- }
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
+ Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum);
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
+ int real_size = map->NumberOfOwnDescriptors();
+ int enum_size = 0;
+ int index = 0;
+
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->IsProperty(i) && !descs->GetDetails(i).IsDontEnum()) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (!details.IsDontEnum()) {
+ if (i < real_size) ++enum_size;
storage->set(index, descs->GetKey(i));
- PropertyDetails details = descs->GetDetails(i);
- sort_array->set(index, Smi::FromInt(details.index()));
if (!indices.is_null()) {
if (details.type() != FIELD) {
indices = Handle<FixedArray>();
- sort_array2 = Handle<FixedArray>();
} else {
int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
if (field_index >= map->inobject_properties()) {
field_index = -(field_index - map->inobject_properties() + 1);
}
indices->set(index, Smi::FromInt(field_index));
- sort_array2->set(index, Smi::FromInt(details.index()));
}
}
index++;
}
}
- storage->SortPairs(*sort_array, sort_array->length());
- if (!indices.is_null()) {
- indices->SortPairs(*sort_array2, sort_array2->length());
- }
+ ASSERT(index == storage->length());
+
+ Handle<FixedArray> bridge_storage =
+ isolate->factory()->NewFixedArray(
+ DescriptorArray::kEnumCacheBridgeLength);
+ DescriptorArray* desc = object->map()->instance_descriptors();
+ desc->SetEnumCache(*bridge_storage,
+ *storage,
+ indices.is_null() ? Object::cast(Smi::FromInt(0))
+ : Object::cast(*indices));
if (cache_result) {
- Handle<FixedArray> bridge_storage =
- isolate->factory()->NewFixedArray(
- DescriptorArray::kEnumCacheBridgeLength);
- DescriptorArray* desc = object->map()->instance_descriptors();
- desc->SetEnumCache(*bridge_storage,
- *storage,
- indices.is_null() ? Object::cast(Smi::FromInt(0))
- : Object::cast(*indices));
+ object->map()->SetEnumLength(enum_size);
}
- ASSERT(storage->length() == index);
- return storage;
+
+ return ReduceFixedArrayTo(storage, enum_size);
} else {
- int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
- Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
- object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
+ Handle<StringDictionary> dictionary(object->property_dictionary());
+
+ int length = dictionary->NumberOfElements();
+ if (length == 0) {
+ return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
+ }
+
+ // The enumeration array is generated by allocating an array big enough to
+ // hold all properties that have been seen, whether they are are deleted or
+ // not. Subsequently all visible properties are added to the array. If some
+ // properties were not visible, the array is trimmed so it only contains
+ // visible properties. This improves over adding elements and sorting by
+ // index by having linear complexity rather than n*log(n).
+
+ // By comparing the monotonous NextEnumerationIndex to the NumberOfElements,
+ // we can predict the number of holes in the final array. If there will be
+ // more than 50% holes, regenerate the enumeration indices to reduce the
+ // number of holes to a minimum. This avoids allocating a large array if
+ // many properties were added but subsequently deleted.
+ int next_enumeration = dictionary->NextEnumerationIndex();
+ if (!object->IsGlobalObject() && next_enumeration > (length * 3) / 2) {
+ StringDictionary::DoGenerateNewEnumerationIndices(dictionary);
+ next_enumeration = dictionary->NextEnumerationIndex();
+ }
+
+ Handle<FixedArray> storage =
+ isolate->factory()->NewFixedArray(next_enumeration);
+
+ storage = Handle<FixedArray>(dictionary->CopyEnumKeysTo(*storage));
+ ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_ENUM));
return storage;
}
}
@@ -958,4 +1040,47 @@ int Utf8Length(Handle<String> str) {
return len;
}
+
+DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
+ : impl_(isolate->handle_scope_implementer()) {
+ ASSERT(impl_->isolate() == Isolate::Current());
+ impl_->BeginDeferredScope();
+ v8::ImplementationUtilities::HandleScopeData* data =
+ impl_->isolate()->handle_scope_data();
+ Object** new_next = impl_->GetSpareOrNewBlock();
+ Object** new_limit = &new_next[kHandleBlockSize];
+ ASSERT(data->limit == &impl_->blocks()->last()[kHandleBlockSize]);
+ impl_->blocks()->Add(new_next);
+
+#ifdef DEBUG
+ prev_level_ = data->level;
+#endif
+ data->level++;
+ prev_limit_ = data->limit;
+ prev_next_ = data->next;
+ data->next = new_next;
+ data->limit = new_limit;
+}
+
+
+DeferredHandleScope::~DeferredHandleScope() {
+ impl_->isolate()->handle_scope_data()->level--;
+ ASSERT(handles_detached_);
+ ASSERT(impl_->isolate()->handle_scope_data()->level == prev_level_);
+}
+
+
+DeferredHandles* DeferredHandleScope::Detach() {
+ DeferredHandles* deferred = impl_->Detach(prev_limit_);
+ v8::ImplementationUtilities::HandleScopeData* data =
+ impl_->isolate()->handle_scope_data();
+ data->next = prev_next_;
+ data->limit = prev_limit_;
+#ifdef DEBUG
+ handles_detached_ = true;
+#endif
+ return deferred;
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/handles.h b/src/3rdparty/v8/src/handles.h
index 960696b..b80dbe5 100644
--- a/src/3rdparty/v8/src/handles.h
+++ b/src/3rdparty/v8/src/handles.h
@@ -95,6 +95,17 @@ class Handle {
};
+// Convenience wrapper.
+template<class T>
+inline Handle<T> handle(T* t) {
+ return Handle<T>(t);
+}
+
+
+class DeferredHandles;
+class HandleScopeImplementer;
+
+
// A stack-allocated class that governs a number of local handles.
// After a handle scope has been created, all local handles will be
// allocated within that handle scope until either the handle scope is
@@ -156,8 +167,37 @@ class HandleScope {
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
+ friend class v8::internal::DeferredHandles;
friend class v8::HandleScope;
+ friend class v8::internal::HandleScopeImplementer;
friend class v8::ImplementationUtilities;
+ friend class v8::internal::Isolate;
+};
+
+
+class DeferredHandles;
+
+
+class DeferredHandleScope {
+ public:
+ explicit DeferredHandleScope(Isolate* isolate);
+ // The DeferredHandles object returned stores the Handles created
+ // since the creation of this DeferredHandleScope. The Handles are
+ // alive as long as the DeferredHandles object is alive.
+ DeferredHandles* Detach();
+ ~DeferredHandleScope();
+
+ private:
+ Object** prev_limit_;
+ Object** prev_next_;
+ HandleScopeImplementer* impl_;
+
+#ifdef DEBUG
+ bool handles_detached_;
+ int prev_level_;
+#endif
+
+ friend class HandleScopeImplementer;
};
@@ -216,7 +256,7 @@ Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
// if none exists.
Handle<JSValue> GetScriptWrapper(Handle<Script> script);
-// Script line number computations.
+// Script line number computations. Note that the line number is zero-based.
void InitScriptLineEnds(Handle<Script> script);
// For string calculates an array of line end positions. If the string
// does not end with a new line character, this character may optionally be
@@ -227,6 +267,7 @@ int GetScriptLineNumber(Handle<Script> script, int code_position);
// The safe version does not make heap allocations but may work much slower.
int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
int GetScriptColumnNumber(Handle<Script> script, int code_position);
+Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script);
// Computes the enumerable keys from interceptors. Used for debug mirrors and
// by GetKeysInFixedArrayFor below.
@@ -243,6 +284,7 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
KeyCollectionType type,
bool* threw);
Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw);
+Handle<FixedArray> ReduceFixedArrayTo(Handle<FixedArray> array, int length);
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result);
@@ -294,6 +336,7 @@ class NoHandleAllocation BASE_EMBEDDED {
inline ~NoHandleAllocation();
private:
int level_;
+ bool active_;
#endif
};
diff --git a/src/3rdparty/v8/src/hashmap.h b/src/3rdparty/v8/src/hashmap.h
index 91843b8..11f6ace 100644
--- a/src/3rdparty/v8/src/hashmap.h
+++ b/src/3rdparty/v8/src/hashmap.h
@@ -40,9 +40,16 @@ class TemplateHashMapImpl {
public:
typedef bool (*MatchFun) (void* key1, void* key2);
+ // The default capacity. This is used by the call sites which want
+ // to pass in a non-default AllocationPolicy but want to use the
+ // default value of capacity specified by the implementation.
+ static const uint32_t kDefaultHashMapCapacity = 8;
+
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
- TemplateHashMapImpl(MatchFun match, uint32_t initial_capacity = 8);
+ TemplateHashMapImpl(MatchFun match,
+ uint32_t capacity = kDefaultHashMapCapacity,
+ AllocationPolicy allocator = AllocationPolicy());
~TemplateHashMapImpl();
@@ -52,7 +59,8 @@ class TemplateHashMapImpl {
struct Entry {
void* key;
void* value;
- uint32_t hash; // the full hash value for key
+ uint32_t hash; // The full hash value for key
+ int order; // If you never remove entries this is the insertion order.
};
// If an entry with matching key is found, Lookup()
@@ -60,7 +68,8 @@ class TemplateHashMapImpl {
// but insert is set, a new entry is inserted with
// corresponding key, key hash, and NULL value.
// Otherwise, NULL is returned.
- Entry* Lookup(void* key, uint32_t hash, bool insert);
+ Entry* Lookup(void* key, uint32_t hash, bool insert,
+ AllocationPolicy allocator = AllocationPolicy());
// Removes the entry with matching key.
// It returns the value of the deleted entry
@@ -97,29 +106,30 @@ class TemplateHashMapImpl {
Entry* map_end() const { return map_ + capacity_; }
Entry* Probe(void* key, uint32_t hash);
- void Initialize(uint32_t capacity);
- void Resize();
+ void Initialize(uint32_t capacity, AllocationPolicy allocator);
+ void Resize(AllocationPolicy allocator);
};
typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
-template<class P>
-TemplateHashMapImpl<P>::TemplateHashMapImpl(MatchFun match,
- uint32_t initial_capacity) {
+template<class AllocationPolicy>
+TemplateHashMapImpl<AllocationPolicy>::TemplateHashMapImpl(
+ MatchFun match, uint32_t initial_capacity, AllocationPolicy allocator) {
match_ = match;
- Initialize(initial_capacity);
+ Initialize(initial_capacity, allocator);
}
-template<class P>
-TemplateHashMapImpl<P>::~TemplateHashMapImpl() {
- P::Delete(map_);
+template<class AllocationPolicy>
+TemplateHashMapImpl<AllocationPolicy>::~TemplateHashMapImpl() {
+ AllocationPolicy::Delete(map_);
}
-template<class P>
-typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
- void* key, uint32_t hash, bool insert) {
+template<class AllocationPolicy>
+typename TemplateHashMapImpl<AllocationPolicy>::Entry*
+TemplateHashMapImpl<AllocationPolicy>::Lookup(
+ void* key, uint32_t hash, bool insert, AllocationPolicy allocator) {
// Find a matching entry.
Entry* p = Probe(key, hash);
if (p->key != NULL) {
@@ -131,11 +141,12 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
p->key = key;
p->value = NULL;
p->hash = hash;
+ p->order = occupancy_;
occupancy_++;
// Grow the map if we reached >= 80% occupancy.
if (occupancy_ + occupancy_/4 >= capacity_) {
- Resize();
+ Resize(allocator);
p = Probe(key, hash);
}
@@ -147,8 +158,8 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
}
-template<class P>
-void* TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
+template<class AllocationPolicy>
+void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
if (p->key == NULL) {
@@ -209,8 +220,8 @@ void* TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
}
-template<class P>
-void TemplateHashMapImpl<P>::Clear() {
+template<class AllocationPolicy>
+void TemplateHashMapImpl<AllocationPolicy>::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) {
@@ -220,15 +231,16 @@ void TemplateHashMapImpl<P>::Clear() {
}
-template<class P>
-typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Start() const {
+template<class AllocationPolicy>
+typename TemplateHashMapImpl<AllocationPolicy>::Entry*
+ TemplateHashMapImpl<AllocationPolicy>::Start() const {
return Next(map_ - 1);
}
-template<class P>
-typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
- const {
+template<class AllocationPolicy>
+typename TemplateHashMapImpl<AllocationPolicy>::Entry*
+ TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
const Entry* end = map_end();
ASSERT(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
@@ -240,9 +252,9 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
}
-template<class P>
-typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
- uint32_t hash) {
+template<class AllocationPolicy>
+typename TemplateHashMapImpl<AllocationPolicy>::Entry*
+ TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) {
ASSERT(key != NULL);
ASSERT(IsPowerOf2(capacity_));
@@ -262,10 +274,11 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
}
-template<class P>
-void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
+template<class AllocationPolicy>
+void TemplateHashMapImpl<AllocationPolicy>::Initialize(
+ uint32_t capacity, AllocationPolicy allocator) {
ASSERT(IsPowerOf2(capacity));
- map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry)));
+ map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
if (map_ == NULL) {
v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
return;
@@ -275,24 +288,26 @@ void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
}
-template<class P>
-void TemplateHashMapImpl<P>::Resize() {
+template<class AllocationPolicy>
+void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
Entry* map = map_;
uint32_t n = occupancy_;
// Allocate larger map.
- Initialize(capacity_ * 2);
+ Initialize(capacity_ * 2, allocator);
// Rehash all current entries.
for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) {
- Lookup(p->key, p->hash, true)->value = p->value;
+ Entry* entry = Lookup(p->key, p->hash, true, allocator);
+ entry->value = p->value;
+ entry->order = p->order;
n--;
}
}
// Delete old map.
- P::Delete(map);
+ AllocationPolicy::Delete(map);
}
@@ -329,13 +344,18 @@ class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
};
TemplateHashMap(
- typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match)
- : TemplateHashMapImpl<AllocationPolicy>(match) { }
+ typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match,
+ AllocationPolicy allocator = AllocationPolicy())
+ : TemplateHashMapImpl<AllocationPolicy>(
+ match,
+ TemplateHashMapImpl<AllocationPolicy>::kDefaultHashMapCapacity,
+ allocator) { }
Iterator begin() const { return Iterator(this, this->Start()); }
Iterator end() const { return Iterator(this, NULL); }
- Iterator find(Key* key, bool insert = false) {
- return Iterator(this, this->Lookup(key, key->Hash(), insert));
+ Iterator find(Key* key, bool insert = false,
+ AllocationPolicy allocator = AllocationPolicy()) {
+ return Iterator(this, this->Lookup(key, key->Hash(), insert, allocator));
}
};
diff --git a/src/3rdparty/v8/src/heap-inl.h b/src/3rdparty/v8/src/heap-inl.h
index aa933b6..cb274cb 100644
--- a/src/3rdparty/v8/src/heap-inl.h
+++ b/src/3rdparty/v8/src/heap-inl.h
@@ -85,13 +85,16 @@ void PromotionQueue::ActivateGuardIfOnTheSamePage() {
MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
PretenureFlag pretenure) {
// Check for ASCII first since this is the common case.
- if (String::IsAscii(str.start(), str.length())) {
+ const char* start = str.start();
+ int length = str.length();
+ int non_ascii_start = String::NonAsciiStart(start, length);
+ if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
return AllocateStringFromAscii(str, pretenure);
}
// Non-ASCII and we need to decode.
- return AllocateStringFromUtf8Slow(str, pretenure);
+ return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure);
}
@@ -283,13 +286,6 @@ MaybeObject* Heap::AllocateRawMap() {
#endif
MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
-#ifdef DEBUG
- if (!result->IsFailure()) {
- // Maps have their own alignment.
- CHECK((reinterpret_cast<intptr_t>(result) & kMapAlignmentMask) ==
- static_cast<intptr_t>(kHeapObjectTag));
- }
-#endif
return result;
}
@@ -484,10 +480,12 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
+ } else {
+ // Give up and reset the counters in case of an overflow.
+ amount_of_external_allocated_memory_ = 0;
+ amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
- intptr_t amount_since_last_global_gc =
- amount_of_external_allocated_memory_ -
- amount_of_external_allocated_memory_at_last_global_gc_;
+ intptr_t amount_since_last_global_gc = PromotedExternalMemorySize();
if (amount_since_last_global_gc > external_allocation_limit_) {
CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
}
@@ -495,8 +493,19 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
// Avoid underflow.
if (amount >= 0) {
amount_of_external_allocated_memory_ = amount;
+ } else {
+ // Give up and reset the counters in case of an overflow.
+ amount_of_external_allocated_memory_ = 0;
+ amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
}
+ if (FLAG_trace_external_memory) {
+ PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
+ PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
+ " amount=%6" V8_PTR_PREFIX "d KB, isolate=0x%08" V8PRIxPTR ".\n",
+ change_in_bytes / 1024, amount_of_external_allocated_memory_ / 1024,
+ reinterpret_cast<intptr_t>(isolate()));
+ }
ASSERT(amount_of_external_allocated_memory_ >= 0);
return amount_of_external_allocated_memory_;
}
@@ -621,12 +630,24 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) {
void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
- ASSERT(heap_->InNewSpace(new_space_strings_[i]));
- ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
+ Object* obj = Object::cast(new_space_strings_[i]);
+ // TODO(yangguo): check that the object is indeed an external string.
+ ASSERT(heap_->InNewSpace(obj));
+ ASSERT(obj != HEAP->the_hole_value());
+ if (obj->IsExternalAsciiString()) {
+ ExternalAsciiString* string = ExternalAsciiString::cast(obj);
+ ASSERT(String::IsAscii(string->GetChars(), string->length()));
+ }
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
- ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
- ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
+ Object* obj = Object::cast(old_space_strings_[i]);
+ // TODO(yangguo): check that the object is indeed an external string.
+ ASSERT(!heap_->InNewSpace(obj));
+ ASSERT(obj != HEAP->the_hole_value());
+ if (obj->IsExternalAsciiString()) {
+ ExternalAsciiString* string = ExternalAsciiString::cast(obj);
+ ASSERT(String::IsAscii(string->GetChars(), string->length()));
+ }
}
#endif
}
@@ -641,9 +662,11 @@ void ExternalStringTable::AddOldObject(HeapObject* object) {
void ExternalStringTable::ShrinkNewObjects(int position) {
new_space_strings_.Rewind(position);
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
+#endif
}
@@ -742,28 +765,15 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
}
-LinearAllocationScope::LinearAllocationScope() {
- HEAP->linear_allocation_scope_depth_++;
-}
-
-
-LinearAllocationScope::~LinearAllocationScope() {
- HEAP->linear_allocation_scope_depth_--;
- ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
-}
-
-
-#ifdef DEBUG
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- ASSERT(HEAP->Contains(object));
- ASSERT(object->map()->IsMap());
+ CHECK(HEAP->Contains(object));
+ CHECK(object->map()->IsMap());
}
}
}
-#endif
double GCTracer::SizeOfHeapObjects() {
@@ -771,37 +781,47 @@ double GCTracer::SizeOfHeapObjects() {
}
-#ifdef DEBUG
DisallowAllocationFailure::DisallowAllocationFailure() {
+#ifdef DEBUG
old_state_ = HEAP->disallow_allocation_failure_;
HEAP->disallow_allocation_failure_ = true;
+#endif
}
DisallowAllocationFailure::~DisallowAllocationFailure() {
+#ifdef DEBUG
HEAP->disallow_allocation_failure_ = old_state_;
-}
#endif
+}
#ifdef DEBUG
AssertNoAllocation::AssertNoAllocation() {
- old_state_ = HEAP->allow_allocation(false);
+ Isolate* isolate = ISOLATE;
+ active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active_) {
+ old_state_ = isolate->heap()->allow_allocation(false);
+ }
}
AssertNoAllocation::~AssertNoAllocation() {
- HEAP->allow_allocation(old_state_);
+ if (active_) HEAP->allow_allocation(old_state_);
}
DisableAssertNoAllocation::DisableAssertNoAllocation() {
- old_state_ = HEAP->allow_allocation(true);
+ Isolate* isolate = ISOLATE;
+ active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active_) {
+ old_state_ = isolate->heap()->allow_allocation(true);
+ }
}
DisableAssertNoAllocation::~DisableAssertNoAllocation() {
- HEAP->allow_allocation(old_state_);
+ if (active_) HEAP->allow_allocation(old_state_);
}
#else
diff --git a/src/3rdparty/v8/src/heap-profiler.cc b/src/3rdparty/v8/src/heap-profiler.cc
index 2e971a5..301b099 100644
--- a/src/3rdparty/v8/src/heap-profiler.cc
+++ b/src/3rdparty/v8/src/heap-profiler.cc
@@ -97,7 +97,7 @@ void HeapProfiler::StopHeapObjectsTracking() {
}
-void HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
+SnapshotObjectId HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->PushHeapObjectsStatsImpl(stream);
}
@@ -158,8 +158,8 @@ void HeapProfiler::StartHeapObjectsTrackingImpl() {
}
-void HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) {
- snapshots_->PushHeapObjectsStats(stream);
+SnapshotObjectId HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) {
+ return snapshots_->PushHeapObjectsStats(stream);
}
@@ -168,6 +168,14 @@ void HeapProfiler::StopHeapObjectsTrackingImpl() {
}
+size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
+ HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+ ASSERT(profiler != NULL);
+ size_t size = profiler->snapshots_->GetUsedMemorySize();
+ return size;
+}
+
+
int HeapProfiler::GetSnapshotsCount() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
diff --git a/src/3rdparty/v8/src/heap-profiler.h b/src/3rdparty/v8/src/heap-profiler.h
index 96b042d..346177b 100644
--- a/src/3rdparty/v8/src/heap-profiler.h
+++ b/src/3rdparty/v8/src/heap-profiler.h
@@ -49,6 +49,8 @@ class HeapProfiler {
static void SetUp();
static void TearDown();
+ static size_t GetMemorySizeUsedByProfiler();
+
static HeapSnapshot* TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control);
@@ -58,7 +60,7 @@ class HeapProfiler {
static void StartHeapObjectsTracking();
static void StopHeapObjectsTracking();
- static void PushHeapObjectsStats(OutputStream* stream);
+ static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
@@ -89,7 +91,7 @@ class HeapProfiler {
void StartHeapObjectsTrackingImpl();
void StopHeapObjectsTrackingImpl();
- void PushHeapObjectsStatsImpl(OutputStream* stream);
+ SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream);
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
diff --git a/src/3rdparty/v8/src/heap.cc b/src/3rdparty/v8/src/heap.cc
index f678517..ebf3ccd 100644
--- a/src/3rdparty/v8/src/heap.cc
+++ b/src/3rdparty/v8/src/heap.cc
@@ -48,6 +48,7 @@
#include "snapshot.h"
#include "store-buffer.h"
#include "v8threads.h"
+#include "v8utils.h"
#include "vm-state-inl.h"
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#include "regexp-macro-assembler.h"
@@ -66,21 +67,26 @@ Heap::Heap()
: isolate_(NULL),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
-#if defined(ANDROID)
-#define LUMP_OF_MEMORY (128 * KB)
- code_range_size_(0),
-#elif defined(V8_TARGET_ARCH_X64)
+#if defined(V8_TARGET_ARCH_X64)
#define LUMP_OF_MEMORY (2 * MB)
code_range_size_(512*MB),
#else
#define LUMP_OF_MEMORY MB
code_range_size_(0),
#endif
+#if defined(ANDROID)
+ reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ initial_semispace_size_(Page::kPageSize),
+ max_old_generation_size_(192*MB),
+ max_executable_size_(max_old_generation_size_),
+#else
reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
max_executable_size_(256l * LUMP_OF_MEMORY),
+#endif
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
@@ -92,6 +98,7 @@ Heap::Heap()
linear_allocation_scope_depth_(0),
contexts_disposed_(0),
global_ic_age_(0),
+ flush_monomorphic_ics_(false),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -134,6 +141,7 @@ Heap::Heap()
previous_survival_rate_trend_(Heap::STABLE),
survival_rate_trend_(Heap::STABLE),
max_gc_pause_(0),
+ total_gc_time_ms_(0),
max_alive_after_gc_(0),
min_in_mutator_(kMaxInt),
alive_after_last_gc_(0),
@@ -150,7 +158,8 @@ Heap::Heap()
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
promotion_queue_(this),
configured_(false),
- chunks_queued_for_free_(NULL) {
+ chunks_queued_for_free_(NULL),
+ relocation_mutex_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -168,12 +177,14 @@ Heap::Heap()
}
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
- global_contexts_list_ = NULL;
+ native_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
+
+ ClearObjectStats(true);
}
@@ -201,6 +212,20 @@ intptr_t Heap::CommittedMemory() {
lo_space_->Size();
}
+
+size_t Heap::CommittedPhysicalMemory() {
+ if (!HasBeenSetUp()) return 0;
+
+ return new_space_.CommittedPhysicalMemory() +
+ old_pointer_space_->CommittedPhysicalMemory() +
+ old_data_space_->CommittedPhysicalMemory() +
+ code_space_->CommittedPhysicalMemory() +
+ map_space_->CommittedPhysicalMemory() +
+ cell_space_->CommittedPhysicalMemory() +
+ lo_space_->CommittedPhysicalMemory();
+}
+
+
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
@@ -315,48 +340,59 @@ void Heap::ReportStatisticsBeforeGC() {
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d\n",
- isolate_->memory_allocator()->Size(),
- isolate_->memory_allocator()->Available());
- PrintF("New space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d\n",
- Heap::new_space_.Size(),
- new_space_.Available());
- PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- old_pointer_space_->Size(),
- old_pointer_space_->Available(),
- old_pointer_space_->Waste());
- PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- old_data_space_->Size(),
- old_data_space_->Available(),
- old_data_space_->Waste());
- PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- code_space_->Size(),
- code_space_->Available(),
- code_space_->Waste());
- PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- map_space_->Size(),
- map_space_->Available(),
- map_space_->Waste());
- PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d"
- ", waste: %8" V8_PTR_PREFIX "d\n",
- cell_space_->Size(),
- cell_space_->Available(),
- cell_space_->Waste());
- PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
- ", available: %8" V8_PTR_PREFIX "d\n",
- lo_space_->Size(),
- lo_space_->Available());
+ PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB\n",
+ isolate_->memory_allocator()->Size() / KB,
+ isolate_->memory_allocator()->Available() / KB);
+ PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ new_space_.Size() / KB,
+ new_space_.Available() / KB,
+ new_space_.CommittedMemory() / KB);
+ PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ old_pointer_space_->SizeOfObjects() / KB,
+ old_pointer_space_->Available() / KB,
+ old_pointer_space_->CommittedMemory() / KB);
+ PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ old_data_space_->SizeOfObjects() / KB,
+ old_data_space_->Available() / KB,
+ old_data_space_->CommittedMemory() / KB);
+ PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ code_space_->SizeOfObjects() / KB,
+ code_space_->Available() / KB,
+ code_space_->CommittedMemory() / KB);
+ PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ map_space_->SizeOfObjects() / KB,
+ map_space_->Available() / KB,
+ map_space_->CommittedMemory() / KB);
+ PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ cell_space_->SizeOfObjects() / KB,
+ cell_space_->Available() / KB,
+ cell_space_->CommittedMemory() / KB);
+ PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ lo_space_->SizeOfObjects() / KB,
+ lo_space_->Available() / KB,
+ lo_space_->CommittedMemory() / KB);
+ PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
+ ", available: %6" V8_PTR_PREFIX "d KB"
+ ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ this->SizeOfObjects() / KB,
+ this->Available() / KB,
+ this->CommittedMemory() / KB);
+ PrintPID("Total time spent in GC : %d ms\n", total_gc_time_ms_);
}
@@ -383,18 +419,23 @@ void Heap::GarbageCollectionPrologue() {
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
-#ifdef DEBUG
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- allow_allocation(false);
+ if (FLAG_flush_code && FLAG_flush_code_incrementally) {
+ mark_compact_collector()->EnableCodeFlushing(true);
+ }
+
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
+#endif
+
+#ifdef DEBUG
+ ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ allow_allocation(false);
if (FLAG_gc_verbose) Print();
-#endif // DEBUG
-#if defined(DEBUG)
ReportStatisticsBeforeGC();
#endif // DEBUG
@@ -402,6 +443,7 @@ void Heap::GarbageCollectionPrologue() {
store_buffer()->GCPrologue();
}
+
intptr_t Heap::SizeOfObjects() {
intptr_t total = 0;
AllSpaces spaces;
@@ -411,17 +453,34 @@ intptr_t Heap::SizeOfObjects() {
return total;
}
+
+void Heap::RepairFreeListsAfterBoot() {
+ PagedSpaces spaces;
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->RepairFreeListsAfterBoot();
+ }
+}
+
+
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
LiveObjectList::GCEpilogue();
-#ifdef DEBUG
- allow_allocation(true);
- ZapFromSpace();
+ // In release mode, we only zap the from space under heap verification.
+ if (Heap::ShouldZapGarbage()) {
+ ZapFromSpace();
+ }
+
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
+#endif
+#ifdef DEBUG
+ allow_allocation(true);
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
@@ -435,6 +494,56 @@ void Heap::GarbageCollectionEpilogue() {
symbol_table()->Capacity());
isolate_->counters()->number_of_symbols()->Set(
symbol_table()->NumberOfElements());
+
+ if (CommittedMemory() > 0) {
+ isolate_->counters()->external_fragmentation_total()->AddSample(
+ static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
+
+ isolate_->counters()->heap_fraction_map_space()->AddSample(
+ static_cast<int>(
+ (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_cell_space()->AddSample(
+ static_cast<int>(
+ (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+
+ isolate_->counters()->heap_sample_total_committed()->AddSample(
+ static_cast<int>(CommittedMemory() / KB));
+ isolate_->counters()->heap_sample_total_used()->AddSample(
+ static_cast<int>(SizeOfObjects() / KB));
+ isolate_->counters()->heap_sample_map_space_committed()->AddSample(
+ static_cast<int>(map_space()->CommittedMemory() / KB));
+ isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
+ static_cast<int>(cell_space()->CommittedMemory() / KB));
+ }
+
+#define UPDATE_COUNTERS_FOR_SPACE(space) \
+ isolate_->counters()->space##_bytes_available()->Set( \
+ static_cast<int>(space()->Available())); \
+ isolate_->counters()->space##_bytes_committed()->Set( \
+ static_cast<int>(space()->CommittedMemory())); \
+ isolate_->counters()->space##_bytes_used()->Set( \
+ static_cast<int>(space()->SizeOfObjects()));
+#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
+ if (space()->CommittedMemory() > 0) { \
+ isolate_->counters()->external_fragmentation_##space()->AddSample( \
+ static_cast<int>(100 - \
+ (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
+ }
+#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
+ UPDATE_COUNTERS_FOR_SPACE(space) \
+ UPDATE_FRAGMENTATION_FOR_SPACE(space)
+
+ UPDATE_COUNTERS_FOR_SPACE(new_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
+#undef UPDATE_COUNTERS_FOR_SPACE
+#undef UPDATE_FRAGMENTATION_FOR_SPACE
+#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
+
#if defined(DEBUG)
ReportStatisticsAfterGC();
#endif // DEBUG
@@ -542,10 +651,12 @@ bool Heap::CollectGarbage(AllocationSpace space,
PerformGarbageCollection(collector, &tracer);
rate->Stop();
+ ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+
+ // This can do debug callbacks and restart incremental marking.
GarbageCollectionEpilogue();
}
- ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
if (incremental_marking()->IsStopped()) {
if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
incremental_marking()->Start();
@@ -566,7 +677,7 @@ void Heap::PerformScavenge() {
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Helper class for verifying the symbol table.
class SymbolTableVerifier : public ObjectVisitor {
public:
@@ -575,20 +686,18 @@ class SymbolTableVerifier : public ObjectVisitor {
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
// Check that the symbol is actually a symbol.
- ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
+ CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
}
}
}
};
-#endif // DEBUG
static void VerifySymbolTable() {
-#ifdef DEBUG
SymbolTableVerifier verifier;
HEAP->symbol_table()->IterateElements(&verifier);
-#endif // DEBUG
}
+#endif // VERIFY_HEAP
static bool AbortIncrementalMarkingAndCollectGarbage(
@@ -603,67 +712,42 @@ static bool AbortIncrementalMarkingAndCollectGarbage(
void Heap::ReserveSpace(
- int new_space_size,
- int pointer_space_size,
- int data_space_size,
- int code_space_size,
- int map_space_size,
- int cell_space_size,
- int large_object_size) {
- NewSpace* new_space = Heap::new_space();
- PagedSpace* old_pointer_space = Heap::old_pointer_space();
- PagedSpace* old_data_space = Heap::old_data_space();
- PagedSpace* code_space = Heap::code_space();
- PagedSpace* map_space = Heap::map_space();
- PagedSpace* cell_space = Heap::cell_space();
- LargeObjectSpace* lo_space = Heap::lo_space();
+ int *sizes,
+ Address *locations_out) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
- if (!new_space->ReserveSpace(new_space_size)) {
- Heap::CollectGarbage(NEW_SPACE,
- "failed to reserve space in the new space");
- gc_performed = true;
- }
- if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
- AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE,
- "failed to reserve space in the old pointer space");
- gc_performed = true;
- }
- if (!(old_data_space->ReserveSpace(data_space_size))) {
- AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE,
- "failed to reserve space in the old data space");
- gc_performed = true;
- }
- if (!(code_space->ReserveSpace(code_space_size))) {
- AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE,
- "failed to reserve space in the code space");
- gc_performed = true;
- }
- if (!(map_space->ReserveSpace(map_space_size))) {
- AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE,
- "failed to reserve space in the map space");
- gc_performed = true;
- }
- if (!(cell_space->ReserveSpace(cell_space_size))) {
- AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE,
- "failed to reserve space in the cell space");
- gc_performed = true;
- }
- // We add a slack-factor of 2 in order to have space for a series of
- // large-object allocations that are only just larger than the page size.
- large_object_size *= 2;
- // The ReserveSpace method on the large object space checks how much
- // we can expand the old generation. This includes expansion caused by
- // allocation in the other spaces.
- large_object_size += cell_space_size + map_space_size + code_space_size +
- data_space_size + pointer_space_size;
- if (!(lo_space->ReserveSpace(large_object_size))) {
- AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE,
- "failed to reserve space in the large object space");
- gc_performed = true;
+ ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
+ for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
+ if (sizes[space] != 0) {
+ MaybeObject* allocation;
+ if (space == NEW_SPACE) {
+ allocation = new_space()->AllocateRaw(sizes[space]);
+ } else {
+ allocation = paged_space(space)->AllocateRaw(sizes[space]);
+ }
+ FreeListNode* node;
+ if (!allocation->To<FreeListNode>(&node)) {
+ if (space == NEW_SPACE) {
+ Heap::CollectGarbage(NEW_SPACE,
+ "failed to reserve space in the new space");
+ } else {
+ AbortIncrementalMarkingAndCollectGarbage(
+ this,
+ static_cast<AllocationSpace>(space),
+ "failed to reserve space in paged space");
+ }
+ gc_performed = true;
+ break;
+ } else {
+ // Mark with a free list node, in case we have a GC before
+ // deserializing.
+ node->set_size(this, sizes[space]);
+ locations_out[space] = node->address();
+ }
+ }
}
}
@@ -691,7 +775,7 @@ void Heap::EnsureFromSpaceIsCommitted() {
void Heap::ClearJSFunctionResultCaches() {
if (isolate_->bootstrapper()->IsActive()) return;
- Object* context = global_contexts_list_;
+ Object* context = native_contexts_list_;
while (!context->IsUndefined()) {
// Get the caches for this context. GC can happen when the context
// is not fully initialized, so the caches can be undefined.
@@ -718,7 +802,7 @@ void Heap::ClearNormalizedMapCaches() {
return;
}
- Object* context = global_contexts_list_;
+ Object* context = native_contexts_list_;
while (!context->IsUndefined()) {
// GC can happen when the context is not fully initialized,
// so the cache can be undefined.
@@ -770,9 +854,12 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
PROFILE(isolate_, CodeMovingGCEvent());
}
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifySymbolTable();
}
+#endif
+
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
@@ -847,8 +934,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// have to limit maximal capacity of the young generation.
new_space_high_promotion_mode_active_ = true;
if (FLAG_trace_gc) {
- PrintF("Limited new space size due to high promotion rate: %d MB\n",
- new_space_.InitialCapacity() / MB);
+ PrintPID("Limited new space size due to high promotion rate: %d MB\n",
+ new_space_.InitialCapacity() / MB);
}
} else if (new_space_high_promotion_mode_active_ &&
IsStableOrDecreasingSurvivalTrend() &&
@@ -858,8 +945,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// to grow again.
new_space_high_promotion_mode_active_ = false;
if (FLAG_trace_gc) {
- PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
- new_space_.MaximumCapacity() / MB);
+ PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
+ new_space_.MaximumCapacity() / MB);
}
}
@@ -899,9 +986,12 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
global_gc_epilogue_callback_();
}
+
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifySymbolTable();
}
+#endif
return next_gc_likely_to_collect_more;
}
@@ -928,7 +1018,7 @@ void Heap::MarkCompact(GCTracer* tracer) {
contexts_disposed_ = 0;
- isolate_->set_context_exit_happened(false);
+ flush_monomorphic_ics_ = false;
}
@@ -938,7 +1028,8 @@ void Heap::MarkCompactPrologue() {
isolate_->keyed_lookup_cache()->Clear();
isolate_->context_slot_cache()->Clear();
isolate_->descriptor_lookup_cache()->Clear();
- StringSplitCache::Clear(string_split_cache());
+ RegExpResultsCache::Clear(string_split_cache());
+ RegExpResultsCache::Clear(regexp_multiple_cache());
isolate_->compilation_cache()->MarkCompactPrologue();
@@ -983,7 +1074,7 @@ class ScavengeVisitor: public ObjectVisitor {
};
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Visitor class to verify pointers in code or data space do not point into
// new space.
class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
@@ -991,7 +1082,7 @@ class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
void VisitPointers(Object** start, Object**end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
- ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
+ CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
}
}
}
@@ -1016,7 +1107,7 @@ static void VerifyNonPointerSpacePointers() {
object->Iterate(&v);
}
}
-#endif
+#endif // VERIFY_HEAP
void Heap::CheckNewSpaceExpansionCriteria() {
@@ -1154,7 +1245,9 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
-#ifdef DEBUG
+ RelocationLock relocation_lock(this);
+
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
#endif
@@ -1220,20 +1313,32 @@ void Heap::Scavenge() {
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
- for (HeapObject* cell = cell_iterator.Next();
- cell != NULL; cell = cell_iterator.Next()) {
- if (cell->IsJSGlobalPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
+ for (HeapObject* heap_object = cell_iterator.Next();
+ heap_object != NULL;
+ heap_object = cell_iterator.Next()) {
+ if (heap_object->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
+ Address value_address = cell->ValueAddress();
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
}
}
- // Scavenge object reachable from the global contexts list directly.
- scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
+ // Copy objects reachable from the code flushing candidates list.
+ MarkCompactCollector* collector = mark_compact_collector();
+ if (collector->is_code_flushing_enabled()) {
+ collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
+ }
+
+ // Scavenge object reachable from the native contexts list directly.
+ scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
+ while (IterateObjectGroups(&scavenge_visitor)) {
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ }
+ isolate()->global_handles()->RemoveObjectGroups();
+
isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
&IsUnscavengedHeapObject);
isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
@@ -1274,6 +1379,51 @@ void Heap::Scavenge() {
}
+// TODO(mstarzinger): Unify this method with
+// MarkCompactCollector::MarkObjectGroups().
+bool Heap::IterateObjectGroups(ObjectVisitor* scavenge_visitor) {
+ List<ObjectGroup*>* object_groups =
+ isolate()->global_handles()->object_groups();
+
+ int last = 0;
+ bool changed = false;
+ for (int i = 0; i < object_groups->length(); i++) {
+ ObjectGroup* entry = object_groups->at(i);
+ ASSERT(entry != NULL);
+
+ Object*** objects = entry->objects_;
+ bool group_marked = false;
+ for (size_t j = 0; j < entry->length_; j++) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ if (!IsUnscavengedHeapObject(this, &object)) {
+ group_marked = true;
+ break;
+ }
+ }
+ }
+
+ if (!group_marked) {
+ (*object_groups)[last++] = entry;
+ continue;
+ }
+
+ for (size_t j = 0; j < entry->length_; ++j) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ scavenge_visitor->VisitPointer(&object);
+ changed = true;
+ }
+ }
+
+ entry->Dispose();
+ object_groups->at(i) = NULL;
+ }
+ object_groups->Rewind(last);
+ return changed;
+}
+
+
HeapObject* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
@@ -1291,9 +1441,11 @@ HeapObject* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
external_string_table_.Verify();
}
+#endif
if (external_string_table_.new_space_strings_.is_empty()) return;
@@ -1391,7 +1543,7 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Object* undefined = undefined_value();
Object* head = undefined;
Context* tail = NULL;
- Object* candidate = global_contexts_list_;
+ Object* candidate = native_contexts_list_;
// We don't record weak slots during marking or scavenges.
// Instead we do it once when we complete mark-compact cycle.
@@ -1464,20 +1616,47 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
}
// Update the head of the list of contexts.
- global_contexts_list_ = head;
+ native_contexts_list_ = head;
}
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
AssertNoAllocation no_allocation;
- class VisitorAdapter : public ObjectVisitor {
+ // Both the external string table and the symbol table may contain
+ // external strings, but neither lists them exhaustively, nor is the
+ // intersection set empty. Therefore we iterate over the external string
+ // table first, ignoring symbols, and then over the symbol table.
+
+ class ExternalStringTableVisitorAdapter : public ObjectVisitor {
public:
- explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
- : visitor_(visitor) {}
+ explicit ExternalStringTableVisitorAdapter(
+ v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ // Visit non-symbol external strings,
+ // since symbols are listed in the symbol table.
+ if (!(*p)->IsSymbol()) {
+ ASSERT((*p)->IsExternalString());
+ visitor_->VisitExternalString(Utils::ToLocal(
+ Handle<String>(String::cast(*p))));
+ }
+ }
+ }
+ private:
+ v8::ExternalResourceVisitor* visitor_;
+ } external_string_table_visitor(visitor);
+
+ external_string_table_.Iterate(&external_string_table_visitor);
+
+ class SymbolTableVisitorAdapter : public ObjectVisitor {
+ public:
+ explicit SymbolTableVisitorAdapter(
+ v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if ((*p)->IsExternalString()) {
+ ASSERT((*p)->IsSymbol());
visitor_->VisitExternalString(Utils::ToLocal(
Handle<String>(String::cast(*p))));
}
@@ -1485,8 +1664,9 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
}
private:
v8::ExternalResourceVisitor* visitor_;
- } visitor_adapter(visitor);
- external_string_table_.Iterate(&visitor_adapter);
+ } symbol_table_visitor(visitor);
+
+ symbol_table()->IterateElements(&symbol_table_visitor);
}
@@ -1590,7 +1770,7 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
- table_.Register(kVisitGlobalContext,
+ table_.Register(kVisitNativeContext,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<Context::kSize>);
@@ -1676,7 +1856,7 @@ class ScavengingVisitor : public StaticVisitorBase {
RecordCopiedObject(heap, target);
HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Isolate* isolate = heap->isolate();
- if (isolate->logger()->is_logging() ||
+ if (isolate->logger()->is_logging_code_events() ||
CpuProfiler::is_profiling(isolate)) {
if (target->IsSharedFunctionInfo()) {
PROFILE(isolate, SharedFunctionInfoMoveEvent(
@@ -1984,9 +2164,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRawMap();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
// Map::cast cannot be used due to uninitialized map field.
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
@@ -1999,6 +2178,9 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
+ int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
+ Map::OwnsDescriptors::encode(true);
+ reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
return result;
}
@@ -2007,9 +2189,8 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRawMap();
+ if (!maybe_result->To(&result)) return maybe_result;
Map* map = reinterpret_cast<Map*>(result);
map->set_map_no_write_barrier(meta_map());
@@ -2021,20 +2202,17 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_instance_size(instance_size);
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
- map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
- map->init_prototype_transitions(undefined_value());
+ map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
+ map->set_instance_descriptors(empty_descriptor_array());
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
+ int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
+ Map::OwnsDescriptors::encode(true);
+ map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
- // If the map object is aligned fill the padding area with Smi 0 objects.
- if (Map::kPadStart < Map::kSize) {
- memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
- 0,
- Map::kSize - Map::kPadStart);
- }
return map;
}
@@ -2071,8 +2249,7 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
{ MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
if (!maybe_info->To(&info)) return maybe_info;
}
- info->set_ic_total_count(0);
- info->set_ic_with_type_info_count(0);
+ info->initialize_storage();
info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
return info;
@@ -2160,17 +2337,17 @@ bool Heap::CreateInitialMaps() {
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Fix the instance_descriptors for the existing maps.
- meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array());
- meta_map()->init_prototype_transitions(undefined_value());
+ meta_map()->init_back_pointer(undefined_value());
+ meta_map()->set_instance_descriptors(empty_descriptor_array());
- fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array());
- fixed_array_map()->init_prototype_transitions(undefined_value());
+ fixed_array_map()->init_back_pointer(undefined_value());
+ fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
- oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array());
- oddball_map()->init_prototype_transitions(undefined_value());
+ oddball_map()->init_back_pointer(undefined_value());
+ oddball_map()->set_instance_descriptors(empty_descriptor_array());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@@ -2378,9 +2555,16 @@ bool Heap::CreateInitialMaps() {
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
- Map* global_context_map = Map::cast(obj);
- global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
- set_global_context_map(global_context_map);
+ set_global_context_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
+ AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ Map* native_context_map = Map::cast(obj);
+ native_context_map->set_dictionary_map(true);
+ native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
+ set_native_context_map(native_context_map);
{ MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
SharedFunctionInfo::kAlignedSize);
@@ -2469,7 +2653,7 @@ bool Heap::CreateApiObjects() {
// bottleneck to trap the Smi-only -> fast elements transition, and there
// appears to be no benefit for optimize this case.
Map* new_neander_map = Map::cast(obj);
- new_neander_map->set_elements_kind(FAST_ELEMENTS);
+ new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
set_neander_map(new_neander_map);
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
@@ -2632,7 +2816,7 @@ bool Heap::CreateInitialObjects() {
// hash code in place. The hash code for the hidden_symbol is zero to ensure
// that it will always be at the first entry in property descriptors.
{ MaybeObject* maybe_obj =
- AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
+ AllocateSymbol(CStrVector(""), 0, String::kEmptyStringHash);
if (!maybe_obj->ToObject(&obj)) return false;
}
hidden_symbol_ = String::cast(obj);
@@ -2693,18 +2877,33 @@ bool Heap::CreateInitialObjects() {
set_single_character_string_cache(FixedArray::cast(obj));
// Allocate cache for string split.
- { MaybeObject* maybe_obj =
- AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED);
+ { MaybeObject* maybe_obj = AllocateFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_string_split_cache(FixedArray::cast(obj));
+ { MaybeObject* maybe_obj = AllocateFixedArray(
+ RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_regexp_multiple_cache(FixedArray::cast(obj));
+
// Allocate cache for external strings pointing to native source code.
{ MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
if (!maybe_obj->ToObject(&obj)) return false;
}
set_natives_source_cache(FixedArray::cast(obj));
+ // Allocate object to hold object observation state.
+ { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_observation_state(JSObject::cast(obj));
+
// Handling of script id generation is in FACTORY->NewScript.
set_last_script_id(undefined_value());
@@ -2724,70 +2923,126 @@ bool Heap::CreateInitialObjects() {
}
-Object* StringSplitCache::Lookup(
- FixedArray* cache, String* string, String* pattern) {
- if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0);
- uint32_t hash = string->Hash();
- uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
+bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
+ RootListIndex writable_roots[] = {
+ kStoreBufferTopRootIndex,
+ kStackLimitRootIndex,
+ kInstanceofCacheFunctionRootIndex,
+ kInstanceofCacheMapRootIndex,
+ kInstanceofCacheAnswerRootIndex,
+ kCodeStubsRootIndex,
+ kNonMonomorphicCacheRootIndex,
+ kPolymorphicCodeCacheRootIndex,
+ kLastScriptIdRootIndex,
+ kEmptyScriptRootIndex,
+ kRealStackLimitRootIndex,
+ kArgumentsAdaptorDeoptPCOffsetRootIndex,
+ kConstructStubDeoptPCOffsetRootIndex,
+ kGetterStubDeoptPCOffsetRootIndex,
+ kSetterStubDeoptPCOffsetRootIndex,
+ kSymbolTableRootIndex,
+ };
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
+ if (root_index == writable_roots[i])
+ return true;
+ }
+ return false;
+}
+
+
+Object* RegExpResultsCache::Lookup(Heap* heap,
+ String* key_string,
+ Object* key_pattern,
+ ResultsCacheType type) {
+ FixedArray* cache;
+ if (!key_string->IsSymbol()) return Smi::FromInt(0);
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ ASSERT(key_pattern->IsString());
+ if (!key_pattern->IsSymbol()) return Smi::FromInt(0);
+ cache = heap->string_split_cache();
+ } else {
+ ASSERT(type == REGEXP_MULTIPLE_INDICES);
+ ASSERT(key_pattern->IsFixedArray());
+ cache = heap->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string->Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == string &&
- cache->get(index + kPatternOffset) == pattern) {
+ if (cache->get(index + kStringOffset) == key_string &&
+ cache->get(index + kPatternOffset) == key_pattern) {
return cache->get(index + kArrayOffset);
}
- index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
- if (cache->get(index + kStringOffset) == string &&
- cache->get(index + kPatternOffset) == pattern) {
+ index =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache->get(index + kStringOffset) == key_string &&
+ cache->get(index + kPatternOffset) == key_pattern) {
return cache->get(index + kArrayOffset);
}
return Smi::FromInt(0);
}
-void StringSplitCache::Enter(Heap* heap,
- FixedArray* cache,
- String* string,
- String* pattern,
- FixedArray* array) {
- if (!string->IsSymbol() || !pattern->IsSymbol()) return;
- uint32_t hash = string->Hash();
- uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
+void RegExpResultsCache::Enter(Heap* heap,
+ String* key_string,
+ Object* key_pattern,
+ FixedArray* value_array,
+ ResultsCacheType type) {
+ FixedArray* cache;
+ if (!key_string->IsSymbol()) return;
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ ASSERT(key_pattern->IsString());
+ if (!key_pattern->IsSymbol()) return;
+ cache = heap->string_split_cache();
+ } else {
+ ASSERT(type == REGEXP_MULTIPLE_INDICES);
+ ASSERT(key_pattern->IsFixedArray());
+ cache = heap->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string->Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
~(kArrayEntriesPerCacheEntry - 1));
if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index + kStringOffset, string);
- cache->set(index + kPatternOffset, pattern);
- cache->set(index + kArrayOffset, array);
+ cache->set(index + kStringOffset, key_string);
+ cache->set(index + kPatternOffset, key_pattern);
+ cache->set(index + kArrayOffset, value_array);
} else {
uint32_t index2 =
- ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index2 + kStringOffset, string);
- cache->set(index2 + kPatternOffset, pattern);
- cache->set(index2 + kArrayOffset, array);
+ cache->set(index2 + kStringOffset, key_string);
+ cache->set(index2 + kPatternOffset, key_pattern);
+ cache->set(index2 + kArrayOffset, value_array);
} else {
cache->set(index2 + kStringOffset, Smi::FromInt(0));
cache->set(index2 + kPatternOffset, Smi::FromInt(0));
cache->set(index2 + kArrayOffset, Smi::FromInt(0));
- cache->set(index + kStringOffset, string);
- cache->set(index + kPatternOffset, pattern);
- cache->set(index + kArrayOffset, array);
+ cache->set(index + kStringOffset, key_string);
+ cache->set(index + kPatternOffset, key_pattern);
+ cache->set(index + kArrayOffset, value_array);
}
}
- if (array->length() < 100) { // Limit how many new symbols we want to make.
- for (int i = 0; i < array->length(); i++) {
- String* str = String::cast(array->get(i));
+ // If the array is a reasonably short list of substrings, convert it into a
+ // list of symbols.
+ if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
+ for (int i = 0; i < value_array->length(); i++) {
+ String* str = String::cast(value_array->get(i));
Object* symbol;
MaybeObject* maybe_symbol = heap->LookupSymbol(str);
if (maybe_symbol->ToObject(&symbol)) {
- array->set(i, symbol);
+ value_array->set(i, symbol);
}
}
}
- array->set_map_no_write_barrier(heap->fixed_cow_array_map());
+ // Convert backing store to a copy-on-write array.
+ value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
}
-void StringSplitCache::Clear(FixedArray* cache) {
- for (int i = 0; i < kStringSplitCacheSize; i++) {
+void RegExpResultsCache::Clear(FixedArray* cache) {
+ for (int i = 0; i < kRegExpResultsCacheSize; i++) {
cache->set(i, Smi::FromInt(0));
}
}
@@ -2817,7 +3072,7 @@ void Heap::AllocateFullSizeNumberStringCache() {
// The idea is to have a small number string cache in the snapshot to keep
// boot-time memory usage down. If we expand the number string cache already
// while creating the snapshot then that didn't work out.
- ASSERT(!Serializer::enabled());
+ ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
MaybeObject* maybe_obj =
AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
Object* new_cache;
@@ -3005,6 +3260,7 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name);
Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
share->set_code(illegal);
+ share->ClearOptimizedCodeMap();
share->set_scope_info(ScopeInfo::Empty());
Code* construct_stub =
isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
@@ -3017,8 +3273,8 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
- share->set_deopt_counter(FLAG_deopt_every_n_times);
- share->set_ic_age(0);
+ share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
+ share->set_counters(0);
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
@@ -3050,6 +3306,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
}
JSMessageObject* message = JSMessageObject::cast(result);
message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
+ message->initialize_elements();
message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->set_type(type);
message->set_arguments(arguments);
@@ -3274,7 +3531,7 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
}
ASSERT(buffer->IsFlat());
-#if DEBUG
+#if VERIFY_HEAP
if (FLAG_verify_heap) {
buffer->StringVerify();
}
@@ -3326,6 +3583,8 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
return Failure::OutOfMemoryException();
}
+ ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
+
Map* map = external_ascii_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@@ -3489,17 +3748,27 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
MaybeObject* maybe_result;
// Large code objects and code objects which should stay at a fixed address
// are allocated in large object space.
- if (obj_size > code_space()->AreaSize() || immovable) {
+ HeapObject* result;
+ bool force_lo_space = obj_size > code_space()->AreaSize();
+ if (force_lo_space) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
+ if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
- Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (immovable && !force_lo_space &&
+ // Objects on the first page of each space are never moved.
+ !code_space_->FirstPage()->Contains(result->address())) {
+ // Discard the first code allocation, which was on a page where it could be
+ // moved.
+ CreateFillerObjectAt(result->address(), obj_size);
+ maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+ if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
+ }
// Initialize the object
- HeapObject::cast(result)->set_map_no_write_barrier(code_map());
+ result->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
@@ -3526,7 +3795,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
// through the self_reference parameter.
code->CopyFrom(desc);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
code->Verify();
}
@@ -3608,7 +3877,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
code->Verify();
}
@@ -3655,29 +3924,27 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// from the function's context, since the function can be from a
// different context.
JSFunction* object_function =
- function->context()->global_context()->object_function();
+ function->context()->native_context()->object_function();
// Each function prototype gets a copy of the object function map.
// This avoid unwanted sharing of maps between prototypes of different
// constructors.
Map* new_map;
ASSERT(object_function->has_initial_map());
- { MaybeObject* maybe_map =
- object_function->initial_map()->CopyDropTransitions();
- if (!maybe_map->To<Map>(&new_map)) return maybe_map;
- }
+ MaybeObject* maybe_map = object_function->initial_map()->Copy();
+ if (!maybe_map->To(&new_map)) return maybe_map;
+
Object* prototype;
- { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
+ MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
+ if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
+
// When creating the prototype for the function we must set its
// constructor to the function.
- Object* result;
- { MaybeObject* maybe_result =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
- constructor_symbol(), function, DONT_ENUM);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_failure =
+ JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
+ constructor_symbol(), function, DONT_ENUM);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
return prototype;
}
@@ -3707,12 +3974,12 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
!JSFunction::cast(callee)->shared()->is_classic_mode();
if (strict_mode_callee) {
boilerplate =
- isolate()->context()->global_context()->
+ isolate()->context()->native_context()->
strict_mode_arguments_boilerplate();
arguments_object_size = kArgumentsObjectSizeStrict;
} else {
boilerplate =
- isolate()->context()->global_context()->arguments_boilerplate();
+ isolate()->context()->native_context()->arguments_boilerplate();
arguments_object_size = kArgumentsObjectSize;
}
@@ -3751,7 +4018,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Check the state of the object
ASSERT(JSObject::cast(result)->HasFastProperties());
- ASSERT(JSObject::cast(result)->HasFastElements());
+ ASSERT(JSObject::cast(result)->HasFastObjectElements());
return result;
}
@@ -3778,25 +4045,22 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// suggested by the function.
int instance_size = fun->shared()->CalculateInstanceSize();
int in_object_properties = fun->shared()->CalculateInObjectProperties();
- Object* map_obj;
- { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
- if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
- }
+ Map* map;
+ MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
+ if (!maybe_map->To(&map)) return maybe_map;
// Fetch or allocate prototype.
Object* prototype;
if (fun->has_instance_prototype()) {
prototype = fun->instance_prototype();
} else {
- { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- }
+ MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
+ if (!maybe_prototype->To(&prototype)) return maybe_prototype;
}
- Map* map = Map::cast(map_obj);
map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
- ASSERT(map->has_fast_elements());
+ ASSERT(map->has_fast_object_elements());
// If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object
@@ -3811,21 +4075,17 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
fun->shared()->ForbidInlineConstructor();
} else {
DescriptorArray* descriptors;
- { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
- if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
- return maybe_descriptors_obj;
- }
- }
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
+ if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
+
DescriptorArray::WhitenessWitness witness(descriptors);
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsSymbol());
- FieldDescriptor field(name, i, NONE);
- field.SetEnumerationIndex(i);
+ FieldDescriptor field(name, i, NONE, i + 1);
descriptors->Set(i, &field, witness);
}
- descriptors->SetNextEnumerationIndex(count);
- descriptors->SortUnchecked(witness);
+ descriptors->Sort();
// The descriptors may contain duplicates because the compiler does not
// guarantee the uniqueness of property names (it would have required
@@ -3834,7 +4094,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
if (HasDuplicates(descriptors)) {
fun->shared()->ForbidInlineConstructor();
} else {
- map->set_instance_descriptors(descriptors);
+ map->InitializeDescriptors(descriptors);
map->set_pre_allocated_property_fields(count);
map->set_unused_property_fields(in_object_properties - count);
}
@@ -3913,8 +4173,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
- ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
- JSObject::cast(obj)->HasFastElements());
+ ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
return obj;
}
@@ -3942,13 +4201,18 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
}
-MaybeObject* Heap::AllocateJSModule() {
+MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
if (!maybe_map->To(&map)) return maybe_map;
// Allocate the object based on the map.
- return AllocateJSObjectFromMap(map, TENURED);
+ JSModule* module;
+ MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
+ if (!maybe_module->To(&module)) return maybe_module;
+ module->set_context(context);
+ module->set_scope_info(scope_info);
+ return module;
}
@@ -3959,6 +4223,9 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
ASSERT(capacity >= length);
+ if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
@@ -3979,8 +4246,7 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
}
} else {
- ASSERT(elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_SMI_ONLY_ELEMENTS);
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedArray(capacity);
} else {
@@ -4006,6 +4272,7 @@ MaybeObject* Heap::AllocateJSArrayWithElements(
array->set_elements(elements);
array->set_length(Smi::FromInt(elements->length()));
+ array->ValidateElements();
return array;
}
@@ -4059,6 +4326,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
ASSERT(constructor->has_initial_map());
Map* map = constructor->initial_map();
+ ASSERT(map->is_dictionary_map());
// Make sure no field properties are described in the initial map.
// This guarantees us that normalizing the properties does not
@@ -4076,13 +4344,11 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
// Allocate a dictionary object for backing storage.
- Object* obj;
- { MaybeObject* maybe_obj =
- StringDictionary::Allocate(
- map->NumberOfDescribedProperties() * 2 + initial_size);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- StringDictionary* dictionary = StringDictionary::cast(obj);
+ StringDictionary* dictionary;
+ MaybeObject* maybe_dictionary =
+ StringDictionary::Allocate(
+ map->NumberOfOwnDescriptors() * 2 + initial_size);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
@@ -4090,36 +4356,32 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d =
- PropertyDetails(details.attributes(), CALLBACKS, details.index());
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ CALLBACKS,
+ details.descriptor_index());
Object* value = descs->GetCallbacksObject(i);
- { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
+ MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
+ if (!maybe_value->ToObject(&value)) return maybe_value;
- Object* result;
- { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- dictionary = StringDictionary::cast(result);
+ MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
+ if (!maybe_added->To(&dictionary)) return maybe_added;
}
// Allocate the global object and initialize it with the backing store.
- { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- JSObject* global = JSObject::cast(obj);
+ JSObject* global;
+ MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
+ if (!maybe_global->To(&global)) return maybe_global;
+
InitializeJSObjectFromMap(global, dictionary, map);
// Create a new map for the global object.
- { MaybeObject* maybe_obj = map->CopyDropDescriptors();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- Map* new_map = Map::cast(obj);
+ Map* new_map;
+ MaybeObject* maybe_map = map->CopyDropDescriptors();
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ new_map->set_dictionary_map(true);
// Set up the global object as a normalized object.
global->set_map(new_map);
- global->map()->clear_instance_descriptors();
global->set_properties(dictionary);
// Make sure result is a global object with properties in dictionary.
@@ -4248,7 +4510,7 @@ MaybeObject* Heap::ReinitializeJSReceiver(
map->set_function_with_prototype(true);
InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
JSFunction::cast(object)->set_context(
- isolate()->context()->global_context());
+ isolate()->context()->native_context());
}
// Put in filler if the new object is smaller than the old.
@@ -4289,7 +4551,8 @@ MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
PretenureFlag pretenure) {
- if (string.length() == 1) {
+ int length = string.length();
+ if (length == 1) {
return Heap::LookupSingleCharacterStringFromCode(string[0]);
}
Object* result;
@@ -4299,22 +4562,20 @@ MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
}
// Copy the characters into the new object.
- SeqAsciiString* string_result = SeqAsciiString::cast(result);
- for (int i = 0; i < string.length(); i++) {
- string_result->SeqAsciiStringSet(i, string[i]);
- }
+ CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length);
return result;
}
MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
+ int non_ascii_start,
PretenureFlag pretenure) {
- // Count the number of characters in the UTF-8 string and check if
- // it is an ASCII string.
+ // Continue counting the number of characters in the UTF-8 string, starting
+ // from the first non-ascii character or word.
+ int chars = non_ascii_start;
Access<UnicodeCache::Utf8Decoder>
decoder(isolate_->unicode_cache()->utf8_decoder());
- decoder->Reset(string.start(), string.length());
- int chars = 0;
+ decoder->Reset(string.start() + non_ascii_start, string.length() - chars);
while (decoder->has_more()) {
uint32_t r = decoder->GetNext();
if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
@@ -4330,16 +4591,16 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
}
// Convert and copy the characters into the new object.
- String* string_result = String::cast(result);
+ SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
decoder->Reset(string.start(), string.length());
int i = 0;
while (i < chars) {
uint32_t r = decoder->GetNext();
if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- string_result->Set(i++, unibrow::Utf16::LeadSurrogate(r));
- string_result->Set(i++, unibrow::Utf16::TrailSurrogate(r));
+ twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::LeadSurrogate(r));
+ twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::TrailSurrogate(r));
} else {
- string_result->Set(i++, r);
+ twobyte->SeqTwoByteStringSet(i++, r);
}
}
return result;
@@ -4349,20 +4610,18 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
PretenureFlag pretenure) {
// Check if the string is an ASCII string.
- MaybeObject* maybe_result;
- if (String::IsAscii(string.start(), string.length())) {
- maybe_result = AllocateRawAsciiString(string.length(), pretenure);
- } else { // It's not an ASCII string.
- maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
- }
Object* result;
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ int length = string.length();
+ const uc16* start = string.start();
- // Copy the characters into the new object, which may be either ASCII or
- // UTF-16.
- String* string_result = String::cast(result);
- for (int i = 0; i < string.length(); i++) {
- string_result->Set(i, string[i]);
+ if (String::IsAscii(start, length)) {
+ MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length);
+ } else { // It's not an ASCII string.
+ MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
}
return result;
}
@@ -4491,6 +4750,16 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ // Initialize string's content to ensure ASCII-ness (character range 0-127)
+ // as required when verifying the heap.
+ char* dest = SeqAsciiString::cast(result)->GetChars();
+ memset(dest, 0x0F, length * kCharSize);
+ }
+#endif
+
return result;
}
@@ -4534,16 +4803,16 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
MaybeObject* Heap::AllocateJSArray(
ElementsKind elements_kind,
PretenureFlag pretenure) {
- Context* global_context = isolate()->context()->global_context();
- JSFunction* array_function = global_context->array_function();
+ Context* native_context = isolate()->context()->native_context();
+ JSFunction* array_function = native_context->array_function();
Map* map = array_function->initial_map();
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- map = Map::cast(global_context->double_js_array_map());
- } else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
- map = Map::cast(global_context->object_js_array_map());
- } else {
- ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
- ASSERT(map == global_context->smi_js_array_map());
+ Object* maybe_map_array = native_context->js_array_maps();
+ if (!maybe_map_array->IsUndefined()) {
+ Object* maybe_transitioned_map =
+ FixedArray::cast(maybe_map_array)->get(elements_kind);
+ if (!maybe_transitioned_map->IsUndefined()) {
+ map = Map::cast(maybe_transitioned_map);
+ }
}
return AllocateJSObjectFromMap(map, pretenure);
@@ -4820,35 +5089,50 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
}
-MaybeObject* Heap::AllocateGlobalContext() {
+MaybeObject* Heap::AllocateNativeContext() {
Object* result;
{ MaybeObject* maybe_result =
- AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
+ AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(global_context_map());
- context->set_smi_js_array_map(undefined_value());
- context->set_double_js_array_map(undefined_value());
- context->set_object_js_array_map(undefined_value());
- ASSERT(context->IsGlobalContext());
+ context->set_map_no_write_barrier(native_context_map());
+ context->set_js_array_maps(undefined_value());
+ ASSERT(context->IsNativeContext());
ASSERT(result->IsContext());
return result;
}
-MaybeObject* Heap::AllocateModuleContext(Context* previous,
+MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
ScopeInfo* scope_info) {
Object* result;
{ MaybeObject* maybe_result =
- AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED);
+ AllocateFixedArray(scope_info->ContextLength(), TENURED);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_no_write_barrier(module_context_map());
- context->set_previous(previous);
+ context->set_map_no_write_barrier(global_context_map());
+ context->set_closure(function);
+ context->set_previous(function->context());
context->set_extension(scope_info);
- context->set_global(previous->global());
+ context->set_global_object(function->context()->global_object());
+ ASSERT(context->IsGlobalContext());
+ ASSERT(result->IsContext());
+ return context;
+}
+
+
+MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
+ Object* result;
+ { MaybeObject* maybe_result =
+ AllocateFixedArray(scope_info->ContextLength(), TENURED);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Context* context = reinterpret_cast<Context*>(result);
+ context->set_map_no_write_barrier(module_context_map());
+ // Context links will be set later.
+ context->set_extension(Smi::FromInt(0));
return context;
}
@@ -4863,9 +5147,9 @@ MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
context->set_map_no_write_barrier(function_context_map());
context->set_closure(function);
context->set_previous(function->context());
- context->set_extension(NULL);
- context->set_global(function->context()->global());
- context->set_qml_global(function->context()->qml_global());
+ context->set_extension(Smi::FromInt(0));
+ context->set_global_object(function->context()->global_object());
+ context->set_qml_global_object(function->context()->qml_global_object());
return context;
}
@@ -4885,8 +5169,8 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
context->set_closure(function);
context->set_previous(previous);
context->set_extension(name);
- context->set_global(previous->global());
- context->set_qml_global(previous->qml_global());
+ context->set_global_object(previous->global_object());
+ context->set_qml_global_object(previous->qml_global_object());
context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
return context;
}
@@ -4904,8 +5188,8 @@ MaybeObject* Heap::AllocateWithContext(JSFunction* function,
context->set_closure(function);
context->set_previous(previous);
context->set_extension(extension);
- context->set_global(previous->global());
- context->set_qml_global(previous->qml_global());
+ context->set_global_object(previous->global_object());
+ context->set_qml_global_object(previous->qml_global_object());
return context;
}
@@ -4923,8 +5207,8 @@ MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
context->set_closure(function);
context->set_previous(previous);
context->set_extension(scope_info);
- context->set_global(previous->global());
- context->set_qml_global(previous->qml_global());
+ context->set_global_object(previous->global_object());
+ context->set_qml_global_object(previous->qml_global_object());
return context;
}
@@ -4998,12 +5282,17 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
bool Heap::IdleNotification(int hint) {
+ // Hints greater than this value indicate that
+ // the embedder is requesting a lot of GC work.
const int kMaxHint = 1000;
+ // Minimal hint that allows to do full GC.
+ const int kMinHintForFullGC = 100;
intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
// The size factor is in range [5..250]. The numbers here are chosen from
// experiments. If you changes them, make sure to test with
// chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
- intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
+ intptr_t step_size =
+ size_factor * IncrementalMarking::kAllocatedThreshold;
if (contexts_disposed_ > 0) {
if (hint >= kMaxHint) {
@@ -5066,16 +5355,30 @@ bool Heap::IdleNotification(int hint) {
mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
ms_count_at_last_idle_notification_ = ms_count_;
- if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+ int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
+ mark_sweeps_since_idle_round_started_;
+
+ if (remaining_mark_sweeps <= 0) {
FinishIdleRound();
return true;
}
if (incremental_marking()->IsStopped()) {
- incremental_marking()->Start();
+ // If there are no more than two GCs left in this idle round and we are
+ // allowed to do a full GC, then make those GCs full in order to compact
+ // the code space.
+ // TODO(ulan): Once we enable code compaction for incremental marking,
+ // we can get rid of this special case and always start incremental marking.
+ if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ "idle notification: finalize idle round");
+ } else {
+ incremental_marking()->Start();
+ }
+ }
+ if (!incremental_marking()->IsStopped()) {
+ AdvanceIdleIncrementalMarking(step_size);
}
-
- AdvanceIdleIncrementalMarking(step_size);
return false;
}
@@ -5249,9 +5552,9 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void Heap::Verify() {
- ASSERT(HasBeenSetUp());
+ CHECK(HasBeenSetUp());
store_buffer()->Verify();
@@ -5269,38 +5572,8 @@ void Heap::Verify() {
cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
-
- VerifyNoAccessorPairSharing();
-}
-
-
-void Heap::VerifyNoAccessorPairSharing() {
- // Verification is done in 2 phases: First we mark all AccessorPairs, checking
- // that we mark only unmarked pairs, then we clear all marks, restoring the
- // initial state. We use the Smi tag of the AccessorPair's getter as the
- // marking bit, because we can never see a Smi as the getter.
- for (int phase = 0; phase < 2; phase++) {
- HeapObjectIterator iter(map_space());
- for (HeapObject* obj = iter.Next(); obj != NULL; obj = iter.Next()) {
- if (obj->IsMap()) {
- DescriptorArray* descs = Map::cast(obj)->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetType(i) == CALLBACKS &&
- descs->GetValue(i)->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(descs->GetValue(i));
- uintptr_t before = reinterpret_cast<intptr_t>(accessors->getter());
- uintptr_t after = (phase == 0) ?
- ((before & ~kSmiTagMask) | kSmiTag) :
- ((before & ~kHeapObjectTag) | kHeapObjectTag);
- CHECK(before != after);
- accessors->set_getter(reinterpret_cast<Object*>(after));
- }
- }
- }
- }
- }
}
-#endif // DEBUG
+#endif
MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
@@ -5393,7 +5666,6 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
}
-#ifdef DEBUG
void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd());
@@ -5406,7 +5678,6 @@ void Heap::ZapFromSpace() {
}
}
}
-#endif // DEBUG
void Heap::IterateAndMarkPointersToFromSpace(Address start,
@@ -5656,6 +5927,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over local handles in handle scopes.
isolate_->handle_scope_implementer()->Iterate(v);
+ isolate_->IterateDeferredHandles(v);
v->Synchronize(VisitorSynchronization::kHandleScope);
// Iterate over the builtin code objects and code stubs in the
@@ -5718,8 +5990,8 @@ bool Heap::ConfigureHeap(int max_semispace_size,
if (max_semispace_size < Page::kPageSize) {
max_semispace_size = Page::kPageSize;
if (FLAG_trace_gc) {
- PrintF("Max semispace size cannot be less than %dkbytes\n",
- Page::kPageSize >> 10);
+ PrintPID("Max semispace size cannot be less than %dkbytes\n",
+ Page::kPageSize >> 10);
}
}
max_semispace_size_ = max_semispace_size;
@@ -5734,8 +6006,8 @@ bool Heap::ConfigureHeap(int max_semispace_size,
if (max_semispace_size_ > reserved_semispace_size_) {
max_semispace_size_ = reserved_semispace_size_;
if (FLAG_trace_gc) {
- PrintF("Max semispace size cannot be more than %dkbytes\n",
- reserved_semispace_size_ >> 10);
+ PrintPID("Max semispace size cannot be more than %dkbytes\n",
+ reserved_semispace_size_ >> 10);
}
}
} else {
@@ -5760,7 +6032,7 @@ bool Heap::ConfigureHeap(int max_semispace_size,
max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
- external_allocation_limit_ = 10 * max_semispace_size_;
+ external_allocation_limit_ = 16 * max_semispace_size_;
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
@@ -6110,7 +6382,7 @@ bool Heap::SetUp(bool create_heap_objects) {
// Create initial objects
if (!CreateInitialObjects()) return false;
- global_contexts_list_ = undefined_value();
+ native_contexts_list_ = undefined_value();
}
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
@@ -6118,6 +6390,8 @@ bool Heap::SetUp(bool create_heap_objects) {
store_buffer()->SetUp();
+ if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
+
return true;
}
@@ -6140,16 +6414,18 @@ void Heap::SetStackLimits() {
void Heap::TearDown() {
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
#endif
+
if (FLAG_print_cumulative_gc_stat) {
PrintF("\n\n");
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
PrintF("max_gc_pause=%d ", get_max_gc_pause());
+ PrintF("total_gc_time=%d ", total_gc_time_ms_);
PrintF("min_in_mutator=%d ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
get_max_alive_after_gc());
@@ -6203,6 +6479,8 @@ void Heap::TearDown() {
isolate_->memory_allocator()->TearDown();
+ delete relocation_mutex_;
+
#ifdef DEBUG
delete debug_utils_;
debug_utils_ = NULL;
@@ -6620,7 +6898,7 @@ void PathTracer::TracePathFrom(Object** root) {
ASSERT((search_target_ == kAnyGlobalObject) ||
search_target_->IsHeapObject());
found_target_in_trace_ = false;
- object_stack_.Clear();
+ Reset();
MarkVisitor mark_visitor(this);
MarkRecursively(root, &mark_visitor);
@@ -6632,8 +6910,8 @@ void PathTracer::TracePathFrom(Object** root) {
}
-static bool SafeIsGlobalContext(HeapObject* obj) {
- return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
+static bool SafeIsNativeContext(HeapObject* obj) {
+ return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
}
@@ -6655,7 +6933,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
return;
}
- bool is_global_context = SafeIsGlobalContext(obj);
+ bool is_native_context = SafeIsNativeContext(obj);
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -6665,7 +6943,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
// Scan the object body.
- if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
+ if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
// This is specialized to scan Context's properly.
Object** start = reinterpret_cast<Object**>(obj->address() +
Context::kHeaderSize);
@@ -6724,11 +7002,7 @@ void PathTracer::ProcessResults() {
for (int i = 0; i < object_stack_.length(); i++) {
if (i > 0) PrintF("\n |\n |\n V\n\n");
Object* obj = object_stack_[i];
-#ifdef OBJECT_PRINT
obj->Print();
-#else
- obj->ShortPrint();
-#endif
}
PrintF("=====================================\n");
}
@@ -6737,6 +7011,15 @@ void PathTracer::ProcessResults() {
#ifdef DEBUG
+// Triggers a depth-first traversal of reachable objects from one
+// given root object and finds a path to a specific heap object and
+// prints it.
+void Heap::TracePathToObjectFrom(Object* target, Object* root) {
+ PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
+ tracer.VisitPointer(&root);
+}
+
+
// Triggers a depth-first traversal of reachable objects from roots
// and finds a path to a specific heap object and prints it.
void Heap::TracePathToObject(Object* target) {
@@ -6824,6 +7107,7 @@ GCTracer::~GCTracer() {
// Update cumulative GC statistics if required.
if (FLAG_print_cumulative_gc_stat) {
+ heap_->total_gc_time_ms_ += time;
heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
heap_->alive_after_last_gc_);
@@ -6831,9 +7115,13 @@ GCTracer::~GCTracer() {
heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
static_cast<int>(spent_in_mutator_));
}
+ } else if (FLAG_trace_gc_verbose) {
+ heap_->total_gc_time_ms_ += time;
}
- PrintF("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
+ if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
+
+ PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
if (!FLAG_trace_gc_nvp) {
int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
@@ -6875,9 +7163,7 @@ GCTracer::~GCTracer() {
PrintF(".\n");
} else {
PrintF("pause=%d ", time);
- PrintF("mutator=%d ",
- static_cast<int>(spent_in_mutator_));
-
+ PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_));
PrintF("gc=");
switch (collector_) {
case SCAVENGER:
@@ -7004,7 +7290,7 @@ void KeyedLookupCache::Clear() {
void DescriptorLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
+ for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
}
@@ -7044,7 +7330,7 @@ void TranscendentalCache::Clear() {
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+ if (new_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
if (heap_->InNewSpace(new_space_strings_[i])) {
@@ -7056,16 +7342,18 @@ void ExternalStringTable::CleanUp() {
new_space_strings_.Rewind(last);
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+ if (old_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
+#endif
}
@@ -7156,4 +7444,63 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
}
+
+void Heap::ClearObjectStats(bool clear_last_time_stats) {
+ memset(object_counts_, 0, sizeof(object_counts_));
+ memset(object_sizes_, 0, sizeof(object_sizes_));
+ if (clear_last_time_stats) {
+ memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
+ memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
+ }
+}
+
+
+static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
+
+
+void Heap::CheckpointObjectStats() {
+ ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
+ Counters* counters = isolate()->counters();
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ counters->count_of_##name()->Increment( \
+ static_cast<int>(object_counts_[name])); \
+ counters->count_of_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[name])); \
+ counters->size_of_##name()->Increment( \
+ static_cast<int>(object_sizes_[name])); \
+ counters->size_of_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[name]));
+ INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+ int index;
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
+ counters->count_of_CODE_TYPE_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_CODE_TYPE_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_CODE_TYPE_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_CODE_TYPE_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
+ counters->count_of_FIXED_ARRAY_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_FIXED_ARRAY_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_FIXED_ARRAY_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_FIXED_ARRAY_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+
+ memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+ memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+ ClearObjectStats();
+}
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/heap.h b/src/3rdparty/v8/src/heap.h
index 0e744e4..12cd295 100644
--- a/src/3rdparty/v8/src/heap.h
+++ b/src/3rdparty/v8/src/heap.h
@@ -64,7 +64,7 @@ namespace internal {
V(Map, ascii_symbol_map, AsciiSymbolMap) \
V(Map, ascii_string_map, AsciiStringMap) \
V(Map, heap_number_map, HeapNumberMap) \
- V(Map, global_context_map, GlobalContextMap) \
+ V(Map, native_context_map, NativeContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, code_map, CodeMap) \
V(Map, scope_info_map, ScopeInfoMap) \
@@ -87,6 +87,7 @@ namespace internal {
V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
+ V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
V(Object, termination_exception, TerminationException) \
V(Smi, hash_seed, HashSeed) \
V(Map, string_map, StringMap) \
@@ -130,6 +131,7 @@ namespace internal {
V(Map, with_context_map, WithContextMap) \
V(Map, block_context_map, BlockContextMap) \
V(Map, module_context_map, ModuleContextMap) \
+ V(Map, global_context_map, GlobalContextMap) \
V(Map, oddball_map, OddballMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
@@ -150,7 +152,10 @@ namespace internal {
V(Smi, real_stack_limit, RealStackLimit) \
V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)
+ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
+ V(JSObject, observation_state, ObservationState)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -172,6 +177,7 @@ namespace internal {
V(constructor_symbol, "constructor") \
V(code_symbol, ".code") \
V(result_symbol, ".result") \
+ V(dot_for_symbol, ".for.") \
V(catch_var_symbol, ".catch-var") \
V(empty_symbol, "") \
V(eval_symbol, "eval") \
@@ -241,7 +247,8 @@ namespace internal {
V(use_strict, "use strict") \
V(dot_symbol, ".") \
V(anonymous_function_symbol, "(anonymous function)") \
- V(compare_ic_symbol, ".compare_ic") \
+ V(compare_ic_symbol, "==") \
+ V(strict_compare_ic_symbol, "===") \
V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity") \
V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") \
@@ -486,6 +493,9 @@ class Heap {
// Returns the amount of executable memory currently committed for the heap.
intptr_t CommittedMemoryExecutable();
+ // Returns the amount of phyical memory currently committed for the heap.
+ size_t CommittedPhysicalMemory();
+
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
@@ -508,6 +518,24 @@ class Heap {
MapSpace* map_space() { return map_space_; }
CellSpace* cell_space() { return cell_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
+ PagedSpace* paged_space(int idx) {
+ switch (idx) {
+ case OLD_POINTER_SPACE:
+ return old_pointer_space();
+ case OLD_DATA_SPACE:
+ return old_data_space();
+ case MAP_SPACE:
+ return map_space();
+ case CELL_SPACE:
+ return cell_space();
+ case CODE_SPACE:
+ return code_space();
+ case NEW_SPACE:
+ case LO_SPACE:
+ UNREACHABLE();
+ }
+ return NULL;
+ }
bool always_allocate() { return always_allocate_scope_depth_ != 0; }
Address always_allocate_scope_depth_address() {
@@ -535,7 +563,8 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSObject(
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateJSModule();
+ MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
+ ScopeInfo* scope_info);
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
@@ -626,7 +655,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateMap(
InstanceType instance_type,
int instance_size,
- ElementsKind elements_kind = FAST_ELEMENTS);
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@@ -656,6 +685,9 @@ class Heap {
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
+ // For use during bootup.
+ void RepairFreeListsAfterBoot();
+
// Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string
// allocation functions based on the encoding of the string buffer used to
@@ -682,6 +714,7 @@ class Heap {
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow(
Vector<const char> str,
+ int non_ascii_start,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte(
Vector<const uc16> str,
@@ -825,13 +858,16 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateHashTable(
int length, PretenureFlag pretenure = NOT_TENURED);
- // Allocate a global (but otherwise uninitialized) context.
- MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
+ // Allocate a native (but otherwise uninitialized) context.
+ MUST_USE_RESULT MaybeObject* AllocateNativeContext();
- // Allocate a module context.
- MUST_USE_RESULT MaybeObject* AllocateModuleContext(Context* previous,
+ // Allocate a global context.
+ MUST_USE_RESULT MaybeObject* AllocateGlobalContext(JSFunction* function,
ScopeInfo* scope_info);
+ // Allocate a module context.
+ MUST_USE_RESULT MaybeObject* AllocateModuleContext(ScopeInfo* scope_info);
+
// Allocate a function context.
MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
JSFunction* function);
@@ -1077,7 +1113,10 @@ class Heap {
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
- int NotifyContextDisposed() { return ++contexts_disposed_; }
+ int NotifyContextDisposed() {
+ flush_monomorphic_ics_ = true;
+ return ++contexts_disposed_;
+ }
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
@@ -1105,8 +1144,8 @@ class Heap {
#endif
void AddGCPrologueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter);
- void RemoveGCPrologueCallback(GCEpilogueCallback callback);
+ GCPrologueCallback callback, GCType gc_type_filter);
+ void RemoveGCPrologueCallback(GCPrologueCallback callback);
void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
@@ -1153,13 +1192,13 @@ class Heap {
// not match the empty string.
String* hidden_symbol() { return hidden_symbol_; }
- void set_global_contexts_list(Object* object) {
- global_contexts_list_ = object;
+ void set_native_contexts_list(Object* object) {
+ native_contexts_list_ = object;
}
- Object* global_contexts_list() { return global_contexts_list_; }
+ Object* native_contexts_list() { return native_contexts_list_; }
// Number of mark-sweeps.
- int ms_count() { return ms_count_; }
+ unsigned int ms_count() { return ms_count_; }
// Iterates over all roots in the heap.
void IterateRoots(ObjectVisitor* v, VisitMode mode);
@@ -1230,21 +1269,19 @@ class Heap {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
- // Get address of global contexts list for serialization support.
- Object** global_contexts_list_address() {
- return &global_contexts_list_;
+ // Get address of native contexts list for serialization support.
+ Object** native_contexts_list_address() {
+ return &native_contexts_list_;
}
-#ifdef DEBUG
- void Print();
- void PrintHandles();
-
+#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
void Verify();
+#endif
- // Verify that AccessorPairs are not shared, i.e. make sure that they have
- // exactly one pointer to them.
- void VerifyNoAccessorPairSharing();
+#ifdef DEBUG
+ void Print();
+ void PrintHandles();
void OldPointerSpaceCheckStoreBuffer();
void MapSpaceCheckStoreBuffer();
@@ -1253,10 +1290,23 @@ class Heap {
// Report heap statistics.
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
+#endif
+
+ // Zapping is needed for verify heap, and always done in debug builds.
+ static inline bool ShouldZapGarbage() {
+#ifdef DEBUG
+ return true;
+#else
+#ifdef VERIFY_HEAP
+ return FLAG_verify_heap;
+#else
+ return false;
+#endif
+#endif
+ }
// Fill in bogus values in from space
void ZapFromSpace();
-#endif
// Print short heap statistics.
void PrintShortHeapStatistics();
@@ -1294,6 +1344,7 @@ class Heap {
return disallow_allocation_failure_;
}
+ void TracePathToObjectFrom(Object* target, Object* root);
void TracePathToObject(Object* target);
void TracePathToGlobal();
#endif
@@ -1308,20 +1359,9 @@ class Heap {
// Commits from space if it is uncommitted.
void EnsureFromSpaceIsCommitted();
- // Support for partial snapshots. After calling this we can allocate a
- // certain number of bytes using only linear allocation (with a
- // LinearAllocationScope and an AlwaysAllocateScope) without using freelists
- // or causing a GC. It returns true of space was reserved or false if a GC is
- // needed. For paged spaces the space requested must include the space wasted
- // at the end of each page when allocating linearly.
- void ReserveSpace(
- int new_space_size,
- int pointer_space_size,
- int data_space_size,
- int code_space_size,
- int map_space_size,
- int cell_space_size,
- int large_object_size);
+ // Support for partial snapshots. After calling this we have a linear
+ // space to write objects in each space.
+ void ReserveSpace(int *sizes, Address* addresses);
//
// Support for the API.
@@ -1397,15 +1437,15 @@ class Heap {
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
-
#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
#undef SYMBOL_DECLARATION
+ // Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+ STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+
kSymbolTableRootIndex,
kStrongRootListLength = kSymbolTableRootIndex,
kRootListLength
@@ -1417,6 +1457,10 @@ class Heap {
STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
+ // Generated code can embed direct references to non-writable roots if
+ // they are in new space.
+ static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString(
@@ -1488,13 +1532,6 @@ class Heap {
void ClearNormalizedMapCaches();
- // Clears the cache of ICs related to this map.
- void ClearCacheOnMap(Map* map) {
- if (FLAG_cleanup_code_caches_at_gc) {
- map->ClearCodeCache(this);
- }
- }
-
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
@@ -1592,6 +1629,16 @@ class Heap {
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
+ void SetGetterStubDeoptPCOffset(int pc_offset) {
+ ASSERT(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+ }
+
+ void SetSetterStubDeoptPCOffset(int pc_offset) {
+ ASSERT(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+ }
+
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -1602,9 +1649,65 @@ class Heap {
}
void AgeInlineCaches() {
- ++global_ic_age_;
+ global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
+ }
+
+ bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
+
+ intptr_t amount_of_external_allocated_memory() {
+ return amount_of_external_allocated_memory_;
+ }
+
+ // ObjectStats are kept in two arrays, counts and sizes. Related stats are
+ // stored in a contiguous linear buffer. Stats groups are stored one after
+ // another.
+ enum {
+ FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
+ FIRST_FIXED_ARRAY_SUB_TYPE =
+ FIRST_CODE_KIND_SUB_TYPE + Code::LAST_CODE_KIND + 1,
+ OBJECT_STATS_COUNT =
+ FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
+ };
+
+ void RecordObjectStats(InstanceType type, int sub_type, size_t size) {
+ ASSERT(type <= LAST_TYPE);
+ if (sub_type < 0) {
+ object_counts_[type]++;
+ object_sizes_[type] += size;
+ } else {
+ if (type == CODE_TYPE) {
+ ASSERT(sub_type <= Code::LAST_CODE_KIND);
+ object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
+ object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
+ } else if (type == FIXED_ARRAY_TYPE) {
+ ASSERT(sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+ object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type]++;
+ object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type] += size;
+ }
+ }
}
+ void CheckpointObjectStats();
+
+ // We don't use a ScopedLock here since we want to lock the heap
+ // only when FLAG_parallel_recompilation is true.
+ class RelocationLock {
+ public:
+ explicit RelocationLock(Heap* heap) : heap_(heap) {
+ if (FLAG_parallel_recompilation) {
+ heap_->relocation_mutex_->Lock();
+ }
+ }
+ ~RelocationLock() {
+ if (FLAG_parallel_recompilation) {
+ heap_->relocation_mutex_->Unlock();
+ }
+ }
+
+ private:
+ Heap* heap_;
+ };
+
private:
Heap();
@@ -1636,6 +1739,8 @@ class Heap {
int global_ic_age_;
+ bool flush_monomorphic_ics_;
+
int scan_on_scavenge_pages_;
#if defined(V8_TARGET_ARCH_X64)
@@ -1657,7 +1762,7 @@ class Heap {
// Returns the amount of external memory registered since last global gc.
intptr_t PromotedExternalMemorySize();
- int ms_count_; // how many mark-sweep collections happened
+ unsigned int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
// For post mortem debugging.
@@ -1729,7 +1834,7 @@ class Heap {
// last GC.
int old_gen_exhausted_;
- Object* global_contexts_list_;
+ Object* native_contexts_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
@@ -1807,6 +1912,7 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
+ bool IterateObjectGroups(ObjectVisitor* scavenge_visitor);
inline void UpdateOldSpaceLimits();
@@ -1998,14 +2104,24 @@ class Heap {
void AdvanceIdleIncrementalMarking(intptr_t step_size);
+ void ClearObjectStats(bool clear_last_time_stats = false);
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
+ // Object counts and used memory by InstanceType
+ size_t object_counts_[OBJECT_STATS_COUNT];
+ size_t object_counts_last_time_[OBJECT_STATS_COUNT];
+ size_t object_sizes_[OBJECT_STATS_COUNT];
+ size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
+
// Maximum GC pause.
int max_gc_pause_;
+ // Total time spent in GC.
+ int total_gc_time_ms_;
+
// Maximum size of objects alive after GC.
intptr_t max_alive_after_gc_;
@@ -2050,15 +2166,16 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
+ Mutex* relocation_mutex_;
+
friend class Factory;
friend class GCTracer;
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
- friend class LinearAllocationScope;
friend class Page;
friend class Isolate;
friend class MarkCompactCollector;
- friend class StaticMarkingVisitor;
+ friend class MarkCompactMarkingVisitor;
friend class MapCompact;
DISALLOW_COPY_AND_ASSIGN(Heap);
@@ -2098,21 +2215,29 @@ class HeapStats {
};
-class AlwaysAllocateScope {
+class DisallowAllocationFailure {
public:
- inline AlwaysAllocateScope();
- inline ~AlwaysAllocateScope();
+ inline DisallowAllocationFailure();
+ inline ~DisallowAllocationFailure();
+
+#ifdef DEBUG
+ private:
+ bool old_state_;
+#endif
};
-class LinearAllocationScope {
+class AlwaysAllocateScope {
public:
- inline LinearAllocationScope();
- inline ~LinearAllocationScope();
+ inline AlwaysAllocateScope();
+ inline ~AlwaysAllocateScope();
+
+ private:
+ // Implicitly disable artificial allocation failures.
+ DisallowAllocationFailure disallow_allocation_failure_;
};
-#ifdef DEBUG
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
@@ -2122,7 +2247,6 @@ class VerifyPointersVisitor: public ObjectVisitor {
public:
inline void VisitPointers(Object** start, Object** end);
};
-#endif
// Space iterator for iterating over all spaces of the heap.
@@ -2281,7 +2405,7 @@ class KeyedLookupCache {
};
-// Cache for mapping (array, property name) into descriptor index.
+// Cache for mapping (map, property name) into descriptor index.
// The cache contains both positive and negative results.
// Descriptor index equals kNotFound means the property is absent.
// Cleared at startup and prior to any gc.
@@ -2289,21 +2413,21 @@ class DescriptorLookupCache {
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
- int Lookup(DescriptorArray* array, String* name) {
+ int Lookup(Map* source, String* name) {
if (!StringShape(name).IsSymbol()) return kAbsent;
- int index = Hash(array, name);
+ int index = Hash(source, name);
Key& key = keys_[index];
- if ((key.array == array) && (key.name == name)) return results_[index];
+ if ((key.source == source) && (key.name == name)) return results_[index];
return kAbsent;
}
// Update an element in the cache.
- void Update(DescriptorArray* array, String* name, int result) {
+ void Update(Map* source, String* name, int result) {
ASSERT(result != kAbsent);
if (StringShape(name).IsSymbol()) {
- int index = Hash(array, name);
+ int index = Hash(source, name);
Key& key = keys_[index];
- key.array = array;
+ key.source = source;
key.name = name;
results_[index] = result;
}
@@ -2317,24 +2441,26 @@ class DescriptorLookupCache {
private:
DescriptorLookupCache() {
for (int i = 0; i < kLength; ++i) {
- keys_[i].array = NULL;
+ keys_[i].source = NULL;
keys_[i].name = NULL;
results_[i] = kAbsent;
}
}
- static int Hash(DescriptorArray* array, String* name) {
+ static int Hash(Object* source, String* name) {
// Uses only lower 32 bits if pointers are larger.
- uint32_t array_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
+ uint32_t source_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source))
+ >> kPointerSizeLog2;
uint32_t name_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
- return (array_hash ^ name_hash) % kLength;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name))
+ >> kPointerSizeLog2;
+ return (source_hash ^ name_hash) % kLength;
}
static const int kLength = 64;
struct Key {
- DescriptorArray* array;
+ Map* source;
String* name;
};
@@ -2346,18 +2472,6 @@ class DescriptorLookupCache {
};
-#ifdef DEBUG
-class DisallowAllocationFailure {
- public:
- inline DisallowAllocationFailure();
- inline ~DisallowAllocationFailure();
-
- private:
- bool old_state_;
-};
-#endif
-
-
// A helper class to document/test C++ scopes where we do not
// expect a GC. Usage:
//
@@ -2373,6 +2487,7 @@ class AssertNoAllocation {
#ifdef DEBUG
private:
bool old_state_;
+ bool active_;
#endif
};
@@ -2385,6 +2500,7 @@ class DisableAssertNoAllocation {
#ifdef DEBUG
private:
bool old_state_;
+ bool active_;
#endif
};
@@ -2504,24 +2620,31 @@ class GCTracer BASE_EMBEDDED {
};
-class StringSplitCache {
+class RegExpResultsCache {
public:
- static Object* Lookup(FixedArray* cache, String* string, String* pattern);
+ enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
+
+ // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
+ // On success, the returned result is guaranteed to be a COW-array.
+ static Object* Lookup(Heap* heap,
+ String* key_string,
+ Object* key_pattern,
+ ResultsCacheType type);
+ // Attempt to add value_array to the cache specified by type. On success,
+ // value_array is turned into a COW-array.
static void Enter(Heap* heap,
- FixedArray* cache,
- String* string,
- String* pattern,
- FixedArray* array);
+ String* key_string,
+ Object* key_pattern,
+ FixedArray* value_array,
+ ResultsCacheType type);
static void Clear(FixedArray* cache);
- static const int kStringSplitCacheSize = 0x100;
+ static const int kRegExpResultsCacheSize = 0x100;
private:
static const int kArrayEntriesPerCacheEntry = 4;
static const int kStringOffset = 0;
static const int kPatternOffset = 1;
static const int kArrayOffset = 2;
-
- static MaybeObject* WrapFixedArrayInJSArray(Object* fixed_array);
};
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.cc b/src/3rdparty/v8/src/hydrogen-instructions.cc
index 26f7f7a..c8edcff 100644
--- a/src/3rdparty/v8/src/hydrogen-instructions.cc
+++ b/src/3rdparty/v8/src/hydrogen-instructions.cc
@@ -156,6 +156,20 @@ void Range::Union(Range* other) {
}
+void Range::CombinedMax(Range* other) {
+ upper_ = Max(upper_, other->upper_);
+ lower_ = Max(lower_, other->lower_);
+ set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
+}
+
+
+void Range::CombinedMin(Range* other) {
+ upper_ = Min(upper_, other->upper_);
+ lower_ = Min(lower_, other->lower_);
+ set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
+}
+
+
void Range::Sar(int32_t value) {
int32_t bits = value & 0x1F;
lower_ = lower_ >> bits;
@@ -336,7 +350,8 @@ HUseListNode* HValue::RemoveUse(HValue* value, int index) {
// Do not reuse use list nodes in debug mode, zap them.
if (current != NULL) {
HUseListNode* temp =
- new HUseListNode(current->value(), current->index(), NULL);
+ new(block()->zone())
+ HUseListNode(current->value(), current->index(), NULL);
current->Zap();
current = temp;
}
@@ -495,8 +510,8 @@ void HValue::RegisterUse(int index, HValue* new_value) {
if (new_value != NULL) {
if (removed == NULL) {
- new_value->use_list_ =
- new HUseListNode(this, index, new_value->use_list_);
+ new_value->use_list_ = new(new_value->block()->zone()) HUseListNode(
+ this, index, new_value->use_list_);
} else {
removed->set_tail(new_value->use_list_);
new_value->use_list_ = removed;
@@ -697,7 +712,7 @@ void HCallGlobal::PrintDataTo(StringStream* stream) {
void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("o ", target()->shared()->DebugName());
+ stream->Add("%o ", target()->shared()->DebugName());
stream->Add("#%d", argument_count());
}
@@ -849,28 +864,20 @@ void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
}
-HValue* HConstant::Canonicalize() {
- return HasNoUses() ? NULL : this;
-}
-
-
-HValue* HTypeof::Canonicalize() {
- return HasNoUses() ? NULL : this;
-}
-
-
HValue* HBitwise::Canonicalize() {
if (!representation().IsInteger32()) return this;
// If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
if (left()->IsConstant() &&
HConstant::cast(left())->HasInteger32Value() &&
- HConstant::cast(left())->Integer32Value() == nop_constant) {
+ HConstant::cast(left())->Integer32Value() == nop_constant &&
+ !right()->CheckFlag(kUint32)) {
return right();
}
if (right()->IsConstant() &&
HConstant::cast(right())->HasInteger32Value() &&
- HConstant::cast(right())->Integer32Value() == nop_constant) {
+ HConstant::cast(right())->Integer32Value() == nop_constant &&
+ !left()->CheckFlag(kUint32)) {
return left();
}
return this;
@@ -882,7 +889,9 @@ HValue* HBitNot::Canonicalize() {
if (value()->IsBitNot()) {
HValue* result = HBitNot::cast(value())->value();
ASSERT(result->representation().IsInteger32());
- return result;
+ if (!result->CheckFlag(kUint32)) {
+ return result;
+ }
}
return this;
}
@@ -945,7 +954,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
// introduced.
if (value()->representation().IsInteger32()) return value();
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
+ defined(V8_TARGET_ARCH_X64)
if (value()->IsDiv() && (value()->UseCount() == 1)) {
// TODO(2038): Implement this optimization for non ARM architectures.
HDiv* hdiv = HDiv::cast(value());
@@ -969,7 +979,7 @@ HValue* HUnaryMathOperation::Canonicalize() {
!HInstruction::cast(new_right)->IsLinked()) {
HInstruction::cast(new_right)->InsertBefore(this);
}
- HMathFloorOfDiv* instr = new HMathFloorOfDiv(context(),
+ HMathFloorOfDiv* instr = new(block()->zone()) HMathFloorOfDiv(context(),
new_left,
new_right);
// Replace this HMathFloor instruction by the new HMathFloorOfDiv.
@@ -1043,6 +1053,13 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
}
+void HLoadElements::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
+ stream->Add(" ");
+ typecheck()->PrintNameTo(stream);
+}
+
+
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" [%p", *map_set()->first());
@@ -1076,6 +1093,11 @@ void HCheckInstanceType::PrintDataTo(StringStream* stream) {
}
+void HCheckPrototypeMaps::PrintDataTo(StringStream* stream) {
+ stream->Add("[receiver_prototype=%p,holder=%p]", *prototype(), *holder());
+}
+
+
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
@@ -1104,6 +1126,7 @@ Range* HChange::InferRange(Zone* zone) {
Range* input_range = value()->range();
if (from().IsInteger32() &&
to().IsTagged() &&
+ !value()->CheckFlag(HInstruction::kUint32) &&
input_range != NULL && input_range->IsInSmiRange()) {
set_type(HType::Smi());
}
@@ -1236,6 +1259,24 @@ Range* HMod::InferRange(Zone* zone) {
}
+Range* HMathMinMax::InferRange(Zone* zone) {
+ if (representation().IsInteger32()) {
+ Range* a = left()->range();
+ Range* b = right()->range();
+ Range* res = a->Copy(zone);
+ if (operation_ == kMathMax) {
+ res->CombinedMax(b);
+ } else {
+ ASSERT(operation_ == kMathMin);
+ res->CombinedMin(b);
+ }
+ return res;
+ } else {
+ return HValue::InferRange(zone);
+ }
+}
+
+
void HPhi::PrintTo(StringStream* stream) {
stream->Add("[");
for (int i = 0; i < OperandCount(); ++i) {
@@ -1256,7 +1297,7 @@ void HPhi::PrintTo(StringStream* stream) {
void HPhi::AddInput(HValue* value) {
- inputs_.Add(NULL);
+ inputs_.Add(NULL, value->block()->zone());
SetOperandAt(OperandCount() - 1, value);
// Mark phis that may have 'arguments' directly or indirectly as an operand.
if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
@@ -1303,14 +1344,33 @@ void HPhi::InitRealUses(int phi_id) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
- Representation rep = value->RequiredInputRepresentation(it.index());
+ Representation rep = value->ObservedInputRepresentation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight();
+ if (FLAG_trace_representation) {
+ PrintF("%d %s is used by %d %s as %s\n",
+ this->id(),
+ this->Mnemonic(),
+ value->id(),
+ value->Mnemonic(),
+ rep.Mnemonic());
+ }
}
}
}
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
+ if (FLAG_trace_representation) {
+ PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
+ this->id(),
+ this->Mnemonic(),
+ other->id(),
+ other->Mnemonic(),
+ other->non_phi_uses_[Representation::kInteger32],
+ other->non_phi_uses_[Representation::kDouble],
+ other->non_phi_uses_[Representation::kTagged]);
+ }
+
for (int i = 0; i < Representation::kNumRepresentations; i++) {
indirect_uses_[i] += other->non_phi_uses_[i];
}
@@ -1324,8 +1384,14 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
+void HPhi::ResetInteger32Uses() {
+ non_phi_uses_[Representation::kInteger32] = 0;
+ indirect_uses_[Representation::kInteger32] = 0;
+}
+
+
void HSimulate::PrintDataTo(StringStream* stream) {
- stream->Add("id=%d", ast_id());
+ stream->Add("id=%d", ast_id().ToInt());
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
@@ -1354,45 +1420,82 @@ void HDeoptimize::PrintDataTo(StringStream* stream) {
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name = function()->debug_name()->ToCString();
- stream->Add("%s, id=%d", *name, function()->id());
+ stream->Add("%s, id=%d", *name, function()->id().ToInt());
+}
+
+
+static bool IsInteger32(double value) {
+ double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
+ return BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(value);
}
HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
has_int32_value_(false),
- has_double_value_(false),
- int32_value_(0),
- double_value_(0) {
+ has_double_value_(false) {
set_representation(r);
SetFlag(kUseGVN);
if (handle_->IsNumber()) {
double n = handle_->Number();
- double roundtrip_value = static_cast<double>(static_cast<int32_t>(n));
- has_int32_value_ = BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(n);
- if (has_int32_value_) int32_value_ = static_cast<int32_t>(n);
+ has_int32_value_ = IsInteger32(n);
+ int32_value_ = DoubleToInt32(n);
double_value_ = n;
has_double_value_ = true;
}
}
-HConstant* HConstant::CopyToRepresentation(Representation r) const {
+HConstant::HConstant(int32_t integer_value, Representation r)
+ : has_int32_value_(true),
+ has_double_value_(true),
+ int32_value_(integer_value),
+ double_value_(FastI2D(integer_value)) {
+ set_representation(r);
+ SetFlag(kUseGVN);
+}
+
+
+HConstant::HConstant(double double_value, Representation r)
+ : has_int32_value_(IsInteger32(double_value)),
+ has_double_value_(true),
+ int32_value_(DoubleToInt32(double_value)),
+ double_value_(double_value) {
+ set_representation(r);
+ SetFlag(kUseGVN);
+}
+
+
+HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
- return new HConstant(handle_, r);
+ if (handle_.is_null()) {
+ ASSERT(has_int32_value_ || has_double_value_);
+ if (has_int32_value_) return new(zone) HConstant(int32_value_, r);
+ return new(zone) HConstant(double_value_, r);
+ }
+ return new(zone) HConstant(handle_, r);
}
-HConstant* HConstant::CopyToTruncatedInt32() const {
- if (!has_double_value_) return NULL;
- int32_t truncated = NumberToInt32(*handle_);
- return new HConstant(FACTORY->NewNumberFromInt(truncated),
- Representation::Integer32());
+HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
+ if (has_int32_value_) {
+ if (handle_.is_null()) {
+ return new(zone) HConstant(int32_value_, Representation::Integer32());
+ } else {
+ // Re-use the existing Handle if possible.
+ return new(zone) HConstant(handle_, Representation::Integer32());
+ }
+ } else if (has_double_value_) {
+ return new(zone) HConstant(DoubleToInt32(double_value_),
+ Representation::Integer32());
+ } else {
+ return NULL;
+ }
}
-bool HConstant::ToBoolean() const {
+bool HConstant::ToBoolean() {
// Converts the constant's boolean value according to
// ECMAScript section 9.2 ToBoolean conversion.
if (HasInteger32Value()) return Integer32Value() != 0;
@@ -1400,17 +1503,25 @@ bool HConstant::ToBoolean() const {
double v = DoubleValue();
return v != 0 && !isnan(v);
}
- if (handle()->IsTrue()) return true;
- if (handle()->IsFalse()) return false;
- if (handle()->IsUndefined()) return false;
- if (handle()->IsNull()) return false;
- if (handle()->IsString() &&
- String::cast(*handle())->length() == 0) return false;
+ Handle<Object> literal = handle();
+ if (literal->IsTrue()) return true;
+ if (literal->IsFalse()) return false;
+ if (literal->IsUndefined()) return false;
+ if (literal->IsNull()) return false;
+ if (literal->IsString() && String::cast(*literal)->length() == 0) {
+ return false;
+ }
return true;
}
void HConstant::PrintDataTo(StringStream* stream) {
- handle()->ShortPrint(stream);
+ if (has_int32_value_) {
+ stream->Add("%d ", int32_value_);
+ } else if (has_double_value_) {
+ stream->Add("%f ", FmtElm(double_value_));
+ } else {
+ handle()->ShortPrint(stream);
+ }
}
@@ -1506,7 +1617,7 @@ Range* HShl::InferRange(Zone* zone) {
}
-Range* HLoadKeyedSpecializedArrayElement::InferRange(Zone* zone) {
+Range* HLoadKeyed::InferRange(Zone* zone) {
switch (elements_kind()) {
case EXTERNAL_PIXEL_ELEMENTS:
return new(zone) Range(0, 255);
@@ -1597,24 +1708,55 @@ void HLoadNamedField::PrintDataTo(StringStream* stream) {
}
+// Returns true if an instance of this map can never find a property with this
+// name in its prototype chain. This means all prototypes up to the top are
+// fast and don't have the name in them. It would be good if we could optimize
+// polymorphic loads where the property is sometimes found in the prototype
+// chain.
+static bool PrototypeChainCanNeverResolve(
+ Handle<Map> map, Handle<String> name) {
+ Isolate* isolate = map->GetIsolate();
+ Object* current = map->prototype();
+ while (current != isolate->heap()->null_value()) {
+ if (current->IsJSGlobalProxy() ||
+ current->IsGlobalObject() ||
+ !current->IsJSObject() ||
+ JSObject::cast(current)->map()->has_named_interceptor() ||
+ JSObject::cast(current)->IsAccessCheckNeeded() ||
+ !JSObject::cast(current)->HasFastProperties()) {
+ return false;
+ }
+
+ LookupResult lookup(isolate);
+ Map* map = JSObject::cast(current)->map();
+ map->LookupDescriptor(NULL, *name, &lookup);
+ if (lookup.IsFound()) return false;
+ if (!lookup.IsCacheable()) return false;
+ current = JSObject::cast(current)->GetPrototype();
+ }
+ return true;
+}
+
+
HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
HValue* object,
SmallMapList* types,
- Handle<String> name)
- : types_(Min(types->length(), kMaxLoadPolymorphism)),
+ Handle<String> name,
+ Zone* zone)
+ : types_(Min(types->length(), kMaxLoadPolymorphism), zone),
name_(name),
need_generic_(false) {
SetOperandAt(0, context);
SetOperandAt(1, object);
set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnMaps);
- int map_transitions = 0;
+ SmallMapList negative_lookups;
for (int i = 0;
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(map->GetIsolate());
- map->LookupInDescriptors(NULL, *name, &lookup);
+ map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
case FIELD: {
@@ -1624,28 +1766,47 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
}
- types_.Add(types->at(i));
+ types_.Add(types->at(i), zone);
break;
}
case CONSTANT_FUNCTION:
- types_.Add(types->at(i));
+ types_.Add(types->at(i), zone);
break;
- case MAP_TRANSITION:
- // We should just ignore these since they are not relevant to a load
- // operation. This means we will deopt if we actually see this map
- // from optimized code.
- map_transitions++;
+ case CALLBACKS:
break;
- default:
+ case TRANSITION:
+ case INTERCEPTOR:
+ case NONEXISTENT:
+ case NORMAL:
+ case HANDLER:
+ UNREACHABLE();
break;
}
+ } else if (lookup.IsCacheable() &&
+ // For dicts the lookup on the map will fail, but the object may
+ // contain the property so we cannot generate a negative lookup
+ // (which would just be a map check and return undefined).
+ !map->is_dictionary_map() &&
+ !map->has_named_interceptor() &&
+ // TODO Do we really need this? (since version 3.13.0)
+ //!map->named_interceptor_is_fallback() &&
+ PrototypeChainCanNeverResolve(map, name)) {
+ negative_lookups.Add(types->at(i), zone);
}
}
- if (types_.length() + map_transitions == types->length() &&
- FLAG_deoptimize_uncommon_cases) {
+ bool need_generic =
+ (types->length() != negative_lookups.length() + types_.length());
+ if (!need_generic && FLAG_deoptimize_uncommon_cases) {
SetFlag(kUseGVN);
+ for (int i = 0; i < negative_lookups.length(); i++) {
+ types_.Add(negative_lookups.at(i), zone);
+ }
} else {
+ // We don't have an easy way to handle both a call (to the generic stub) and
+ // a deopt in the same hydrogen instruction, so in this case we don't add
+ // the negative lookups which can deopt - just let the generic stub handle
+ // them.
SetAllSideEffects();
need_generic_ = true;
}
@@ -1685,36 +1846,47 @@ void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
+void HLoadKeyed::PrintDataTo(StringStream* stream) {
+ if (!is_external()) {
+ elements()->PrintNameTo(stream);
+ } else {
+ ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+ elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ elements()->PrintNameTo(stream);
+ stream->Add(".");
+ stream->Add(ElementsKindToString(elements_kind()));
+ }
+
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("]");
+ stream->Add("] ");
+ dependency()->PrintNameTo(stream);
+ if (RequiresHoleCheck()) {
+ stream->Add(" check_hole");
+ }
}
-bool HLoadKeyedFastElement::RequiresHoleCheck() {
- if (hole_check_mode_ == OMIT_HOLE_CHECK) {
+bool HLoadKeyed::RequiresHoleCheck() const {
+ if (IsFastPackedElementsKind(elements_kind())) {
return false;
}
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ return true;
+ }
+
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
- if (!use->IsChange()) return true;
+ if (!use->IsChange()) {
+ return true;
+ }
}
return false;
}
-void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
- elements()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("]");
-}
-
-
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@@ -1727,25 +1899,26 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
// Recognize generic keyed loads that use property name generated
// by for-in statement as a key and rewrite them into fast property load
// by index.
- if (key()->IsLoadKeyedFastElement()) {
- HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key());
- if (key_load->object()->IsForInCacheArray()) {
+ if (key()->IsLoadKeyed()) {
+ HLoadKeyed* key_load = HLoadKeyed::cast(key());
+ if (key_load->elements()->IsForInCacheArray()) {
HForInCacheArray* names_cache =
- HForInCacheArray::cast(key_load->object());
+ HForInCacheArray::cast(key_load->elements());
if (names_cache->enumerable() == object()) {
HForInCacheArray* index_cache =
names_cache->index_cache();
HCheckMapValue* map_check =
new(block()->zone()) HCheckMapValue(object(), names_cache->map());
- HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
+ HInstruction* index = new(block()->zone()) HLoadKeyed(
index_cache,
key_load->key(),
- HLoadKeyedFastElement::OMIT_HOLE_CHECK);
- HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
- object(), index);
+ key_load->key(),
+ key_load->elements_kind());
map_check->InsertBefore(this);
index->InsertBefore(this);
+ HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
+ object(), index);
load->InsertBefore(this);
return load;
}
@@ -1756,52 +1929,6 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
}
-void HLoadKeyedSpecializedArrayElement::PrintDataTo(
- StringStream* stream) {
- external_pointer()->PrintNameTo(stream);
- stream->Add(".");
- switch (elements_kind()) {
- case EXTERNAL_BYTE_ELEMENTS:
- stream->Add("byte");
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- stream->Add("u_byte");
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- stream->Add("short");
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- stream->Add("u_short");
- break;
- case EXTERNAL_INT_ELEMENTS:
- stream->Add("int");
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- stream->Add("u_int");
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- stream->Add("float");
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- stream->Add("double");
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- stream->Add("pixel");
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("]");
-}
-
-
void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
@@ -1828,17 +1955,17 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] = ");
- value()->PrintNameTo(stream);
-}
-
+void HStoreKeyed::PrintDataTo(StringStream* stream) {
+ if (!is_external()) {
+ elements()->PrintNameTo(stream);
+ } else {
+ elements()->PrintNameTo(stream);
+ stream->Add(".");
+ stream->Add(ElementsKindToString(elements_kind()));
+ ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+ elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ }
-void HStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
- elements()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] = ");
@@ -1855,56 +1982,15 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
-void HStoreKeyedSpecializedArrayElement::PrintDataTo(
- StringStream* stream) {
- external_pointer()->PrintNameTo(stream);
- stream->Add(".");
- switch (elements_kind()) {
- case EXTERNAL_BYTE_ELEMENTS:
- stream->Add("byte");
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- stream->Add("u_byte");
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- stream->Add("short");
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- stream->Add("u_short");
- break;
- case EXTERNAL_INT_ELEMENTS:
- stream->Add("int");
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- stream->Add("u_int");
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- stream->Add("float");
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- stream->Add("double");
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- stream->Add("pixel");
- break;
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- stream->Add("[");
- key()->PrintNameTo(stream);
- stream->Add("] = ");
- value()->PrintNameTo(stream);
-}
-
-
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+ ElementsKind from_kind = original_map()->elements_kind();
+ ElementsKind to_kind = transitioned_map()->elements_kind();
+ stream->Add(" %p [%s] -> %p [%s]",
+ *original_map(),
+ ElementsAccessor::ForKind(from_kind)->name(),
+ *transitioned_map(),
+ ElementsAccessor::ForKind(to_kind)->name());
}
@@ -1915,7 +2001,7 @@ void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
}
-bool HLoadGlobalCell::RequiresHoleCheck() {
+bool HLoadGlobalCell::RequiresHoleCheck() const {
if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
@@ -1997,6 +2083,10 @@ HType HPhi::CalculateInferredType() {
HType HConstant::CalculateInferredType() {
+ if (has_int32_value_) {
+ return Smi::IsValid(int32_value_) ? HType::Smi() : HType::HeapNumber();
+ }
+ if (has_double_value_) return HType::HeapNumber();
return HType::TypeFromValue(handle_);
}
@@ -2144,6 +2234,13 @@ HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
+HValue* HMathFloorOfDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+ visited->Add(id());
+ SetFlag(kBailoutOnMinusZero);
+ return NULL;
+}
+
+
HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
visited->Add(id());
if (range() == NULL || range()->CanBeMinusZero()) {
@@ -2175,10 +2272,10 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
-bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() {
- // If value was loaded from unboxed double backing store or
- // converted from an integer then we don't have to canonicalize it.
- if (value()->IsLoadKeyedFastDoubleElement() ||
+bool HStoreKeyed::NeedsCanonicalization() {
+ // If value is an integer or comes from the result of a keyed load
+ // then it will be a non-hole value: no need for canonicalization.
+ if (value()->IsLoadKeyed() ||
(value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
return false;
}
@@ -2439,12 +2536,6 @@ void HCheckFunction::Verify() {
ASSERT(HasNoUses());
}
-
-void HCheckPrototypeMaps::Verify() {
- HInstruction::Verify();
- ASSERT(HasNoUses());
-}
-
#endif
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.h b/src/3rdparty/v8/src/hydrogen-instructions.h
index c2c51ca..7136657 100644
--- a/src/3rdparty/v8/src/hydrogen-instructions.h
+++ b/src/3rdparty/v8/src/hydrogen-instructions.h
@@ -53,6 +53,7 @@ class LChunkBuilder;
#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
+ V(BinaryOperation) \
V(BitwiseBinaryOperation) \
V(ControlInstruction) \
V(Instruction) \
@@ -124,7 +125,6 @@ class LChunkBuilder;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
@@ -133,14 +133,14 @@ class LChunkBuilder;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
V(MathFloorOfDiv) \
+ V(MathMinMax) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
@@ -152,6 +152,7 @@ class LChunkBuilder;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(Ror) \
V(Sar) \
V(Shl) \
V(Shr) \
@@ -161,15 +162,14 @@ class LChunkBuilder;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(Sub) \
V(ThisFunction) \
@@ -224,6 +224,16 @@ class LChunkBuilder;
virtual Opcode opcode() const { return HValue::k##type; }
+#ifdef DEBUG
+#define ASSERT_ALLOCATION_DISABLED do { \
+ OptimizingCompilerThread* thread = \
+ ISOLATE->optimizing_compiler_thread(); \
+ ASSERT(thread->IsOptimizerThread() || !HEAP->IsAllocationAllowed()); \
+ } while (0)
+#else
+#define ASSERT_ALLOCATION_DISABLED do {} while (0)
+#endif
+
class Range: public ZoneObject {
public:
Range()
@@ -276,6 +286,8 @@ class Range: public ZoneObject {
void Intersect(Range* other);
void Union(Range* other);
+ void CombinedMax(Range* other);
+ void CombinedMin(Range* other);
void AddConstant(int32_t value);
void Sar(int32_t value);
@@ -549,7 +561,14 @@ class HValue: public ZoneObject {
kIsArguments,
kTruncatingToInt32,
kIsDead,
- kLastFlag = kIsDead
+ // Instructions that are allowed to produce full range unsigned integer
+ // values are marked with kUint32 flag. If arithmetic shift or a load from
+ // EXTERNAL_UNSIGNED_INT_ELEMENTS array is not marked with this flag
+ // it will deoptimize if result does not fit into signed integer range.
+ // HGraph::ComputeSafeUint32Operations is responsible for setting this
+ // flag.
+ kUint32,
+ kLastFlag = kUint32
};
STATIC_ASSERT(kLastFlag < kBitsPerInt);
@@ -645,7 +664,7 @@ class HValue: public ZoneObject {
// Operands.
virtual int OperandCount() = 0;
- virtual HValue* OperandAt(int index) = 0;
+ virtual HValue* OperandAt(int index) const = 0;
void SetOperandAt(int index, HValue* value);
void DeleteAndReplaceWith(HValue* other);
@@ -720,6 +739,11 @@ class HValue: public ZoneObject {
return representation();
}
+ // Type feedback access.
+ virtual Representation ObservedInputRepresentation(int index) {
+ return RequiredInputRepresentation(index);
+ }
+
// This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an
// instruction with a new one, first add the new instruction to the graph,
@@ -751,6 +775,10 @@ class HValue: public ZoneObject {
UNREACHABLE();
}
+ bool IsDead() const {
+ return HasNoUses() && !HasObservableSideEffects() && IsDeletable();
+ }
+
#ifdef DEBUG
virtual void Verify() = 0;
#endif
@@ -835,6 +863,8 @@ class HValue: public ZoneObject {
GVNFlagSet gvn_flags_;
private:
+ virtual bool IsDeletable() const { return false; }
+
DISALLOW_COPY_AND_ASSIGN(HValue);
};
@@ -852,9 +882,14 @@ class HInstruction: public HValue {
void InsertBefore(HInstruction* next);
void InsertAfter(HInstruction* previous);
+ // The position is a write-once variable.
int position() const { return position_; }
bool has_position() const { return position_ != RelocInfo::kNoPosition; }
- void set_position(int position) { position_ = position; }
+ void set_position(int position) {
+ ASSERT(!has_position());
+ ASSERT(position != RelocInfo::kNoPosition);
+ position_ = position;
+ }
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
@@ -898,7 +933,7 @@ template<int V>
class HTemplateInstruction : public HInstruction {
public:
int OperandCount() { return V; }
- HValue* OperandAt(int i) { return inputs_[i]; }
+ HValue* OperandAt(int i) const { return inputs_[i]; }
protected:
void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
@@ -950,7 +985,7 @@ class HTemplateControlInstruction: public HControlInstruction {
void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
int OperandCount() { return V; }
- HValue* OperandAt(int i) { return inputs_[i]; }
+ HValue* OperandAt(int i) const { return inputs_[i]; }
protected:
@@ -987,14 +1022,15 @@ class HSoftDeoptimize: public HTemplateInstruction<0> {
class HDeoptimize: public HControlInstruction {
public:
- explicit HDeoptimize(int environment_length) : values_(environment_length) { }
+ HDeoptimize(int environment_length, Zone* zone)
+ : values_(environment_length, zone) { }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) { return values_[index]; }
+ virtual HValue* OperandAt(int index) const { return values_[index]; }
virtual void PrintDataTo(StringStream* stream);
virtual int SuccessorCount() { return 0; }
@@ -1006,8 +1042,8 @@ class HDeoptimize: public HControlInstruction {
UNREACHABLE();
}
- void AddEnvironmentValue(HValue* value) {
- values_.Add(NULL);
+ void AddEnvironmentValue(HValue* value, Zone* zone) {
+ values_.Add(NULL, zone);
SetOperandAt(values_.length() - 1, value);
}
@@ -1155,7 +1191,7 @@ class HUnaryOperation: public HTemplateInstruction<1> {
return reinterpret_cast<HUnaryOperation*>(value);
}
- HValue* value() { return OperandAt(0); }
+ HValue* value() const { return OperandAt(0); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -1231,8 +1267,8 @@ class HChange: public HUnaryOperation {
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
- Representation from() { return value()->representation(); }
- Representation to() { return representation(); }
+ Representation from() const { return value()->representation(); }
+ Representation to() const { return representation(); }
bool deoptimize_on_undefined() const {
return CheckFlag(kDeoptimizeOnUndefined);
}
@@ -1251,6 +1287,11 @@ class HChange: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const {
+ return !from().IsTagged() || value()->type().IsSmi();
+ }
};
@@ -1270,23 +1311,27 @@ class HClampToUint8: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
class HSimulate: public HInstruction {
public:
- HSimulate(int ast_id, int pop_count)
+ HSimulate(BailoutId ast_id, int pop_count, Zone* zone)
: ast_id_(ast_id),
pop_count_(pop_count),
- values_(2),
- assigned_indexes_(2) {}
+ values_(2, zone),
+ assigned_indexes_(2, zone),
+ zone_(zone) {}
virtual ~HSimulate() {}
virtual void PrintDataTo(StringStream* stream);
- bool HasAstId() const { return ast_id_ != AstNode::kNoNumber; }
- int ast_id() const { return ast_id_; }
- void set_ast_id(int id) {
+ bool HasAstId() const { return !ast_id_.IsNone(); }
+ BailoutId ast_id() const { return ast_id_; }
+ void set_ast_id(BailoutId id) {
ASSERT(!HasAstId());
ast_id_ = id;
}
@@ -1307,7 +1352,7 @@ class HSimulate: public HInstruction {
AddValue(kNoIndex, value);
}
virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) { return values_[index]; }
+ virtual HValue* OperandAt(int index) const { return values_[index]; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@@ -1327,17 +1372,18 @@ class HSimulate: public HInstruction {
private:
static const int kNoIndex = -1;
void AddValue(int index, HValue* value) {
- assigned_indexes_.Add(index);
+ assigned_indexes_.Add(index, zone_);
// Resize the list of pushed values.
- values_.Add(NULL);
+ values_.Add(NULL, zone_);
// Set the operand through the base method in HValue to make sure that the
// use lists are correctly updated.
SetOperandAt(values_.length() - 1, value);
}
- int ast_id_;
+ BailoutId ast_id_;
int pop_count_;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
+ Zone* zone_;
};
@@ -1377,20 +1423,30 @@ class HStackCheck: public HTemplateInstruction<1> {
};
+enum InliningKind {
+ NORMAL_RETURN, // Normal function/method call and return.
+ DROP_EXTRA_ON_RETURN, // Drop an extra value from the environment on return.
+ CONSTRUCT_CALL_RETURN, // Either use allocated receiver or return value.
+ GETTER_CALL_RETURN, // Returning from a getter, need to restore context.
+ SETTER_CALL_RETURN // Use the RHS of the assignment as the return value.
+};
+
+
class HEnterInlined: public HTemplateInstruction<0> {
public:
HEnterInlined(Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
CallKind call_kind,
- bool is_construct,
+ InliningKind inlining_kind,
Variable* arguments_var,
ZoneList<HValue*>* arguments_values)
: closure_(closure),
arguments_count_(arguments_count),
+ arguments_pushed_(false),
function_(function),
call_kind_(call_kind),
- is_construct_(is_construct),
+ inlining_kind_(inlining_kind),
arguments_var_(arguments_var),
arguments_values_(arguments_values) {
}
@@ -1399,9 +1455,11 @@ class HEnterInlined: public HTemplateInstruction<0> {
Handle<JSFunction> closure() const { return closure_; }
int arguments_count() const { return arguments_count_; }
+ bool arguments_pushed() const { return arguments_pushed_; }
+ void set_arguments_pushed() { arguments_pushed_ = true; }
FunctionLiteral* function() const { return function_; }
CallKind call_kind() const { return call_kind_; }
- bool is_construct() const { return is_construct_; }
+ InliningKind inlining_kind() const { return inlining_kind_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@@ -1415,9 +1473,10 @@ class HEnterInlined: public HTemplateInstruction<0> {
private:
Handle<JSFunction> closure_;
int arguments_count_;
+ bool arguments_pushed_;
FunctionLiteral* function_;
CallKind call_kind_;
- bool is_construct_;
+ InliningKind inlining_kind_;
Variable* arguments_var_;
ZoneList<HValue*>* arguments_values_;
};
@@ -1425,21 +1484,13 @@ class HEnterInlined: public HTemplateInstruction<0> {
class HLeaveInlined: public HTemplateInstruction<0> {
public:
- explicit HLeaveInlined(bool arguments_pushed)
- : arguments_pushed_(arguments_pushed) { }
+ HLeaveInlined() { }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
- bool arguments_pushed() {
- return arguments_pushed_;
- }
-
DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
-
- private:
- bool arguments_pushed_;
};
@@ -1461,7 +1512,7 @@ class HPushArgument: public HUnaryOperation {
class HThisFunction: public HTemplateInstruction<0> {
public:
- explicit HThisFunction(Handle<JSFunction> closure) : closure_(closure) {
+ HThisFunction() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@@ -1470,18 +1521,13 @@ class HThisFunction: public HTemplateInstruction<0> {
return Representation::None();
}
- Handle<JSFunction> closure() const { return closure_; }
-
DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
protected:
- virtual bool DataEquals(HValue* other) {
- HThisFunction* b = HThisFunction::cast(other);
- return *closure() == *b->closure();
- }
+ virtual bool DataEquals(HValue* other) { return true; }
private:
- Handle<JSFunction> closure_;
+ virtual bool IsDeletable() const { return true; }
};
@@ -1500,6 +1546,9 @@ class HContext: public HTemplateInstruction<0> {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -1518,6 +1567,9 @@ class HOuterContext: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -1573,6 +1625,7 @@ class HGlobalObject: public HUnaryOperation {
}
private:
+ virtual bool IsDeletable() const { return true; }
bool qml_global_;
};
@@ -1593,6 +1646,9 @@ class HGlobalReceiver: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -1855,7 +1911,9 @@ class HCallRuntime: public HCall<1> {
class HJSArrayLength: public HTemplateInstruction<2> {
public:
- HJSArrayLength(HValue* value, HValue* typecheck) {
+ HJSArrayLength(HValue* value, HValue* typecheck,
+ HType type = HType::Tagged()) {
+ set_type(type);
// The length of an array is stored as a tagged value in the array
// object. It is guaranteed to be 32 bit integer, but it can be
// represented as either a smi or heap number.
@@ -1879,13 +1937,17 @@ class HJSArrayLength: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other_raw) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
class HFixedArrayBaseLength: public HUnaryOperation {
public:
explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
+ set_type(HType::Smi());
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnArrayLengths);
@@ -1899,6 +1961,32 @@ class HFixedArrayBaseLength: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
+};
+
+
+class HMapEnumLength: public HUnaryOperation {
+ public:
+ explicit HMapEnumLength(HValue* value) : HUnaryOperation(value) {
+ set_type(HType::Smi());
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -1918,6 +2006,9 @@ class HElementsKind: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -1940,6 +2031,9 @@ class HBitNot: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -2022,18 +2116,27 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
}
private:
+ virtual bool IsDeletable() const { return true; }
+
BuiltinFunctionId op_;
};
-class HLoadElements: public HUnaryOperation {
+class HLoadElements: public HTemplateInstruction<2> {
public:
- explicit HLoadElements(HValue* value) : HUnaryOperation(value) {
+ HLoadElements(HValue* value, HValue* typecheck) {
+ SetOperandAt(0, value);
+ SetOperandAt(1, typecheck);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnElementsPointer);
}
+ HValue* value() { return OperandAt(0); }
+ HValue* typecheck() { return OperandAt(1); }
+
+ virtual void PrintDataTo(StringStream* stream);
+
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -2042,6 +2145,9 @@ class HLoadElements: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -2065,12 +2171,16 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
class HCheckMaps: public HTemplateInstruction<2> {
public:
- HCheckMaps(HValue* value, Handle<Map> map, HValue* typecheck = NULL) {
+ HCheckMaps(HValue* value, Handle<Map> map, Zone* zone,
+ HValue* typecheck = NULL) {
SetOperandAt(0, value);
// If callers don't depend on a typecheck, they can pass in NULL. In that
// case we use a copy of the |value| argument as a dummy value.
@@ -2079,9 +2189,9 @@ class HCheckMaps: public HTemplateInstruction<2> {
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
- map_set()->Add(map);
+ map_set()->Add(map, zone);
}
- HCheckMaps(HValue* value, SmallMapList* maps) {
+ HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone) {
SetOperandAt(0, value);
SetOperandAt(1, value);
set_representation(Representation::Tagged());
@@ -2089,37 +2199,31 @@ class HCheckMaps: public HTemplateInstruction<2> {
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
for (int i = 0; i < maps->length(); i++) {
- map_set()->Add(maps->at(i));
+ map_set()->Add(maps->at(i), zone);
}
map_set()->Sort();
}
- static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map) {
- HCheckMaps* check_map = new HCheckMaps(object, map);
+ static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map,
+ Zone* zone) {
+ HCheckMaps* check_map = new(zone) HCheckMaps(object, map, zone);
SmallMapList* map_set = check_map->map_set();
- // If the map to check has the untransitioned elements, it can be hoisted
- // above TransitionElements instructions.
- if (map->has_fast_smi_only_elements()) {
- check_map->ClearGVNFlag(kDependsOnElementsKind);
- }
-
- Map* transitioned_fast_element_map =
- map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL);
- ASSERT(transitioned_fast_element_map == NULL ||
- map->elements_kind() != FAST_ELEMENTS);
- if (transitioned_fast_element_map != NULL) {
- map_set->Add(Handle<Map>(transitioned_fast_element_map));
- }
- Map* transitioned_double_map =
- map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL);
- ASSERT(transitioned_double_map == NULL ||
- map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
- if (transitioned_double_map != NULL) {
- map_set->Add(Handle<Map>(transitioned_double_map));
- }
+ // Since transitioned elements maps of the initial map don't fail the map
+ // check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
+ check_map->ClearGVNFlag(kDependsOnElementsKind);
+
+ ElementsKind kind = map->elements_kind();
+ bool packed = IsFastPackedElementsKind(kind);
+ while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
+ kind = GetNextMoreGeneralFastElementsKind(kind, packed);
+ Map* transitioned_map =
+ map->LookupElementsTransitionMap(kind);
+ if (transitioned_map) {
+ map_set->Add(Handle<Map>(transitioned_map), zone);
+ }
+ };
map_set->Sort();
-
return check_map;
}
@@ -2185,17 +2289,17 @@ class HCheckFunction: public HUnaryOperation {
class HCheckInstanceType: public HUnaryOperation {
public:
- static HCheckInstanceType* NewIsSpecObject(HValue* value) {
- return new HCheckInstanceType(value, IS_SPEC_OBJECT);
+ static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
+ return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
}
- static HCheckInstanceType* NewIsJSArray(HValue* value) {
- return new HCheckInstanceType(value, IS_JS_ARRAY);
+ static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) {
+ return new(zone) HCheckInstanceType(value, IS_JS_ARRAY);
}
- static HCheckInstanceType* NewIsString(HValue* value) {
- return new HCheckInstanceType(value, IS_STRING);
+ static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
+ return new(zone) HCheckInstanceType(value, IS_STRING);
}
- static HCheckInstanceType* NewIsSymbol(HValue* value) {
- return new HCheckInstanceType(value, IS_SYMBOL);
+ static HCheckInstanceType* NewIsSymbol(HValue* value, Zone* zone) {
+ return new(zone) HCheckInstanceType(value, IS_SYMBOL);
}
virtual void PrintDataTo(StringStream* stream);
@@ -2286,10 +2390,6 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
SetGVNFlag(kDependsOnMaps);
}
-#ifdef DEBUG
- virtual void Verify();
-#endif
-
Handle<JSObject> prototype() const { return prototype_; }
Handle<JSObject> holder() const { return holder_; }
@@ -2299,8 +2399,10 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
return Representation::None();
}
+ virtual void PrintDataTo(StringStream* stream);
+
virtual intptr_t Hashcode() {
- ASSERT(!HEAP->IsAllocationAllowed());
+ ASSERT_ALLOCATION_DISABLED;
intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
return hash;
@@ -2344,8 +2446,8 @@ class HCheckSmi: public HUnaryOperation {
class HPhi: public HValue {
public:
- explicit HPhi(int merged_index)
- : inputs_(2),
+ HPhi(int merged_index, Zone* zone)
+ : inputs_(2, zone),
merged_index_(merged_index),
phi_id_(-1),
is_live_(false),
@@ -2367,7 +2469,7 @@ class HPhi: public HValue {
}
virtual HType CalculateInferredType();
virtual int OperandCount() { return inputs_.length(); }
- virtual HValue* OperandAt(int index) { return inputs_[index]; }
+ virtual HValue* OperandAt(int index) const { return inputs_[index]; }
HValue* GetRedundantReplacement();
void AddInput(HValue* value);
bool HasRealUses();
@@ -2424,11 +2526,15 @@ class HPhi: public HValue {
bool AllOperandsConvertibleToInteger() {
for (int i = 0; i < OperandCount(); ++i) {
- if (!OperandAt(i)->IsConvertibleToInteger()) return false;
+ if (!OperandAt(i)->IsConvertibleToInteger()) {
+ return false;
+ }
}
return true;
}
+ void ResetInteger32Uses();
+
protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -2459,26 +2565,51 @@ class HArgumentsObject: public HTemplateInstruction<0> {
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
class HConstant: public HTemplateInstruction<0> {
public:
HConstant(Handle<Object> handle, Representation r);
+ HConstant(int32_t value, Representation r);
+ HConstant(double value, Representation r);
- Handle<Object> handle() const { return handle_; }
+ Handle<Object> handle() {
+ if (handle_.is_null()) {
+ handle_ = FACTORY->NewNumber(double_value_, TENURED);
+ }
+ ASSERT(has_int32_value_ || !handle_->IsSmi());
+ return handle_;
+ }
bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
bool ImmortalImmovable() const {
+ if (has_int32_value_) {
+ return false;
+ }
+ if (has_double_value_) {
+ if (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
+ isnan(double_value_)) {
+ return true;
+ }
+ return false;
+ }
+
+ ASSERT(!handle_.is_null());
Heap* heap = HEAP;
+ // We should have handled minus_zero_value and nan_value in the
+ // has_double_value_ clause above.
+ ASSERT(*handle_ != heap->minus_zero_value());
+ ASSERT(*handle_ != heap->nan_value());
if (*handle_ == heap->undefined_value()) return true;
if (*handle_ == heap->null_value()) return true;
if (*handle_ == heap->true_value()) return true;
if (*handle_ == heap->false_value()) return true;
if (*handle_ == heap->the_hole_value()) return true;
- if (*handle_ == heap->minus_zero_value()) return true;
- if (*handle_ == heap->nan_value()) return true;
if (*handle_ == heap->empty_string()) return true;
return false;
}
@@ -2488,20 +2619,15 @@ class HConstant: public HTemplateInstruction<0> {
}
virtual bool IsConvertibleToInteger() const {
- if (handle_->IsSmi()) return true;
- if (handle_->IsHeapNumber() &&
- (HeapNumber::cast(*handle_)->value() ==
- static_cast<double>(NumberToInt32(*handle_)))) return true;
- return false;
+ return has_int32_value_;
}
virtual bool EmitAtUses() { return !representation().IsDouble(); }
- virtual HValue* Canonicalize();
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
- bool IsInteger() const { return handle_->IsSmi(); }
- HConstant* CopyToRepresentation(Representation r) const;
- HConstant* CopyToTruncatedInt32() const;
+ bool IsInteger() { return handle()->IsSmi(); }
+ HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
+ HConstant* CopyToTruncatedInt32(Zone* zone) const;
bool HasInteger32Value() const { return has_int32_value_; }
int32_t Integer32Value() const {
ASSERT(HasInteger32Value());
@@ -2512,24 +2638,35 @@ class HConstant: public HTemplateInstruction<0> {
ASSERT(HasDoubleValue());
return double_value_;
}
- bool HasNumberValue() const { return has_int32_value_ || has_double_value_; }
+ bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
ASSERT(HasNumberValue());
- if (has_int32_value_) return int32_value_;
- return DoubleToInt32(double_value_);
+ // Irrespective of whether a numeric HConstant can be safely
+ // represented as an int32, we store the (in some cases lossy)
+ // representation of the number in int32_value_.
+ return int32_value_;
}
- bool HasStringValue() const { return handle_->IsString(); }
- bool ToBoolean() const;
+ bool ToBoolean();
+
+ bool IsUint32() {
+ return HasInteger32Value() && (Integer32Value() >= 0);
+ }
virtual intptr_t Hashcode() {
- ASSERT(!HEAP->allow_allocation(false));
- intptr_t hash = reinterpret_cast<intptr_t>(*handle());
- // Prevent smis from having fewer hash values when truncated to
- // the least significant bits.
- const int kShiftSize = kSmiShiftSize + kSmiTagSize;
- STATIC_ASSERT(kShiftSize != 0);
- return hash ^ (hash >> kShiftSize);
+ ASSERT_ALLOCATION_DISABLED;
+ intptr_t hash;
+
+ if (has_int32_value_) {
+ hash = static_cast<intptr_t>(int32_value_);
+ } else if (has_double_value_) {
+ hash = static_cast<intptr_t>(BitCast<int64_t>(double_value_));
+ } else {
+ ASSERT(!handle_.is_null());
+ hash = reinterpret_cast<intptr_t>(*handle_);
+ }
+
+ return hash;
}
#ifdef DEBUG
@@ -2543,15 +2680,34 @@ class HConstant: public HTemplateInstruction<0> {
virtual bool DataEquals(HValue* other) {
HConstant* other_constant = HConstant::cast(other);
- return handle().is_identical_to(other_constant->handle());
+ if (has_int32_value_) {
+ return other_constant->has_int32_value_ &&
+ int32_value_ == other_constant->int32_value_;
+ } else if (has_double_value_) {
+ return other_constant->has_double_value_ &&
+ BitCast<int64_t>(double_value_) ==
+ BitCast<int64_t>(other_constant->double_value_);
+ } else {
+ ASSERT(!handle_.is_null());
+ return !other_constant->handle_.is_null() &&
+ *handle_ == *other_constant->handle_;
+ }
}
private:
+ virtual bool IsDeletable() const { return true; }
+
+ // If this is a numerical constant, handle_ either points to to the
+ // HeapObject the constant originated from or is null. If the
+ // constant is non-numeric, handle_ always points to a valid
+ // constant HeapObject.
Handle<Object> handle_;
- // The following two values represent the int32 and the double value of the
- // given constant if there is a lossless conversion between the constant
- // and the specific representation.
+ // We store the HConstant in the most specific form safely possible.
+ // The two flags, has_int32_value_ and has_double_value_ tell us if
+ // int32_value_ and double_value_ hold valid, safe representations
+ // of the constant. has_int32_value_ implies has_double_value_ but
+ // not the converse.
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
int32_t int32_value_;
@@ -2578,6 +2734,7 @@ class HBinaryOperation: public HTemplateInstruction<3> {
if (IsCommutative() && left()->IsConstant()) return right();
return left();
}
+
HValue* MostConstantOperand() {
if (IsCommutative() && left()->IsConstant()) return left();
return right();
@@ -2586,6 +2743,8 @@ class HBinaryOperation: public HTemplateInstruction<3> {
virtual bool IsCommutative() const { return false; }
virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
};
@@ -2660,6 +2819,9 @@ class HArgumentsElements: public HTemplateInstruction<0> {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+ private:
+ virtual bool IsDeletable() const { return true; }
+
bool from_inlined_;
};
@@ -2679,6 +2841,9 @@ class HArgumentsLength: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -2711,16 +2876,42 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
};
+enum BoundsCheckKeyMode {
+ DONT_ALLOW_SMI_KEY,
+ ALLOW_SMI_KEY
+};
+
+
class HBoundsCheck: public HTemplateInstruction<2> {
public:
- HBoundsCheck(HValue* index, HValue* length) {
+ HBoundsCheck(HValue* index, HValue* length,
+ BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY)
+ : key_mode_(key_mode) {
SetOperandAt(0, index);
SetOperandAt(1, length);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int arg_index) {
+ if (key_mode_ == DONT_ALLOW_SMI_KEY ||
+ !length()->representation().IsTagged()) {
+ return Representation::Integer32();
+ }
+ // If the index is tagged and isn't constant, then allow the length
+ // to be tagged, since it is usually already tagged from loading it out of
+ // the length field of a JSArray. This allows for direct comparison without
+ // untagging.
+ if (index()->representation().IsTagged() && !index()->IsConstant()) {
+ return Representation::Tagged();
+ }
+ // Also allow the length to be tagged if the index is constant, because
+ // it can be tagged to allow direct comparison.
+ if (index()->IsConstant() &&
+ index()->representation().IsInteger32() &&
+ arg_index == 1) {
+ return Representation::Tagged();
+ }
return Representation::Integer32();
}
@@ -2733,6 +2924,7 @@ class HBoundsCheck: public HTemplateInstruction<2> {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+ BoundsCheckKeyMode key_mode_;
};
@@ -2743,6 +2935,9 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
SetAllSideEffects();
+ observed_input_representation_[0] = Representation::Tagged();
+ observed_input_representation_[1] = Representation::None();
+ observed_input_representation_[2] = Representation::None();
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -2762,7 +2957,21 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
virtual HType CalculateInferredType();
+ virtual Representation ObservedInputRepresentation(int index) {
+ return observed_input_representation_[index];
+ }
+
+ void InitializeObservedInputRepresentation(Representation r) {
+ observed_input_representation_[1] = r;
+ observed_input_representation_[2] = r;
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
+
+ private:
+ virtual bool IsDeletable() const { return true; }
+
+ Representation observed_input_representation_[3];
};
@@ -2772,8 +2981,11 @@ class HMathFloorOfDiv: public HBinaryOperation {
: HBinaryOperation(context, left, right) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
+ SetFlag(kCanOverflow);
}
+ virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
@@ -2782,6 +2994,9 @@ class HMathFloorOfDiv: public HBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -2814,6 +3029,9 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
}
return HValue::InferredRepresentation();
}
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -3099,6 +3317,9 @@ class HGetCachedArrayIndex: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -3203,7 +3424,7 @@ class HPower: public HTemplateInstruction<2> {
}
HValue* left() { return OperandAt(0); }
- HValue* right() { return OperandAt(1); }
+ HValue* right() const { return OperandAt(1); }
virtual Representation RequiredInputRepresentation(int index) {
return index == 0
@@ -3215,6 +3436,11 @@ class HPower: public HTemplateInstruction<2> {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ virtual bool IsDeletable() const {
+ return !right()->representation().IsTagged();
+ }
};
@@ -3232,6 +3458,9 @@ class HRandom: public HTemplateInstruction<1> {
}
DECLARE_CONCRETE_INSTRUCTION(Random)
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -3378,6 +3607,47 @@ class HDiv: public HArithmeticBinaryOperation {
};
+class HMathMinMax: public HArithmeticBinaryOperation {
+ public:
+ enum Operation { kMathMin, kMathMax };
+
+ HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
+ : HArithmeticBinaryOperation(context, left, right),
+ operation_(op) { }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return index == 0
+ ? Representation::Tagged()
+ : representation();
+ }
+
+ virtual Representation InferredRepresentation() {
+ if (left()->representation().IsInteger32() &&
+ right()->representation().IsInteger32()) {
+ return Representation::Integer32();
+ }
+ return Representation::Double();
+ }
+
+ virtual bool IsCommutative() const { return true; }
+
+ Operation operation() { return operation_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
+
+ protected:
+ virtual bool DataEquals(HValue* other) {
+ return other->IsMathMinMax() &&
+ HMathMinMax::cast(other)->operation_ == operation_;
+ }
+
+ virtual Range* InferRange(Zone* zone);
+
+ private:
+ Operation operation_;
+};
+
+
class HBitwise: public HBitwiseBinaryOperation {
public:
HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
@@ -3472,13 +3742,32 @@ class HSar: public HBitwiseBinaryOperation {
};
+class HRor: public HBitwiseBinaryOperation {
+ public:
+ HRor(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) {
+ ChangeRepresentation(Representation::Integer32());
+ }
+
+ static HInstruction* NewHRor(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
+ DECLARE_CONCRETE_INSTRUCTION(Ror)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
class HOsrEntry: public HTemplateInstruction<0> {
public:
- explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
+ explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
SetGVNFlag(kChangesOsrEntries);
}
- int ast_id() const { return ast_id_; }
+ BailoutId ast_id() const { return ast_id_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@@ -3487,7 +3776,7 @@ class HOsrEntry: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
private:
- int ast_id_;
+ BailoutId ast_id_;
};
@@ -3581,12 +3870,12 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
}
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
- bool RequiresHoleCheck();
+ bool RequiresHoleCheck() const;
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
- ASSERT(!HEAP->allow_allocation(false));
+ ASSERT_ALLOCATION_DISABLED;
return reinterpret_cast<intptr_t>(*cell_);
}
@@ -3603,6 +3892,8 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
}
private:
+ virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+
Handle<JSGlobalPropertyCell> cell_;
PropertyDetails details_;
};
@@ -3650,7 +3941,8 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HValue* new_space_dominator) {
- return !object->IsAllocateObject() || (object != new_space_dominator);
+ return (!object->IsAllocateObject() && !object->IsFastLiteral()) ||
+ (object != new_space_dominator);
}
@@ -3763,7 +4055,7 @@ class HLoadContextSlot: public HUnaryOperation {
return mode_ == kCheckDeoptimize;
}
- bool RequiresHoleCheck() {
+ bool RequiresHoleCheck() const {
return mode_ != kNoCheck;
}
@@ -3782,6 +4074,8 @@ class HLoadContextSlot: public HUnaryOperation {
}
private:
+ virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+
int slot_index_;
Mode mode_;
};
@@ -3874,6 +4168,8 @@ class HLoadNamedField: public HUnaryOperation {
}
private:
+ virtual bool IsDeletable() const { return true; }
+
bool is_in_object_;
int offset_;
};
@@ -3884,7 +4180,8 @@ class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
HLoadNamedFieldPolymorphic(HValue* context,
HValue* object,
SmallMapList* types,
- Handle<String> name);
+ Handle<String> name,
+ Zone* zone);
HValue* context() { return OperandAt(0); }
HValue* object() { return OperandAt(1); }
@@ -3971,162 +4268,134 @@ class ArrayInstructionInterface {
virtual ~ArrayInstructionInterface() { };
};
-class HLoadKeyedFastElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
+
+class HLoadKeyed
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
- enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
+ HLoadKeyed(HValue* obj,
+ HValue* key,
+ HValue* dependency,
+ ElementsKind elements_kind)
+ : bit_field_(0) {
+ bit_field_ = ElementsKindField::encode(elements_kind);
- HLoadKeyedFastElement(HValue* obj,
- HValue* key,
- HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
- : hole_check_mode_(hole_check_mode),
- index_offset_(0),
- is_dehoisted_(false) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
- set_representation(Representation::Tagged());
- SetGVNFlag(kDependsOnArrayElements);
- SetFlag(kUseGVN);
- }
+ SetOperandAt(2, dependency);
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32.
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
- }
+ if (!is_external()) {
+ // I can detect the case between storing double (holey and fast) and
+ // smi/object by looking at elements_kind_.
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
+ IsFastDoubleElementsKind(elements_kind));
- virtual void PrintDataTo(StringStream* stream);
+ if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+ if (IsFastSmiElementsKind(elements_kind) &&
+ IsFastPackedElementsKind(elements_kind)) {
+ set_type(HType::Smi());
+ }
- bool RequiresHoleCheck();
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- if (!other->IsLoadKeyedFastElement()) return false;
- HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
- if (is_dehoisted_ && index_offset_ != other_load->index_offset_)
- return false;
- return hole_check_mode_ == other_load->hole_check_mode_;
- }
-
- private:
- HoleCheckMode hole_check_mode_;
- uint32_t index_offset_;
- bool is_dehoisted_;
-};
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kDependsOnArrayElements);
+ } else {
+ set_representation(Representation::Double());
+ SetGVNFlag(kDependsOnDoubleArrayElements);
+ }
+ } else {
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ set_representation(Representation::Double());
+ } else {
+ set_representation(Representation::Integer32());
+ }
+ SetGVNFlag(kDependsOnSpecializedArrayElements);
+ // Native code could change the specialized array.
+ SetGVNFlag(kDependsOnCalls);
+ }
-class HLoadKeyedFastDoubleElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
- public:
- HLoadKeyedFastDoubleElement(HValue* elements, HValue* key)
- : index_offset_(0), is_dehoisted_(false) {
- SetOperandAt(0, elements);
- SetOperandAt(1, key);
- set_representation(Representation::Double());
- SetGVNFlag(kDependsOnDoubleArrayElements);
SetFlag(kUseGVN);
}
+ bool is_external() const {
+ return IsExternalArrayElementsKind(elements_kind());
+ }
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+ HValue* dependency() { return OperandAt(2); }
+ uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
+ void SetIndexOffset(uint32_t index_offset) {
+ bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
+ }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32.
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
+ bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
+ void SetDehoisted(bool is_dehoisted) {
+ bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
+ }
+ ElementsKind elements_kind() const {
+ return ElementsKindField::decode(bit_field_);
}
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- uint32_t index_offset_;
- bool is_dehoisted_;
-};
-
-
-class HLoadKeyedSpecializedArrayElement
- : public HTemplateInstruction<2>, public ArrayInstructionInterface {
- public:
- HLoadKeyedSpecializedArrayElement(HValue* external_elements,
- HValue* key,
- ElementsKind elements_kind)
- : elements_kind_(elements_kind),
- index_offset_(0),
- is_dehoisted_(false) {
- SetOperandAt(0, external_elements);
- SetOperandAt(1, key);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- set_representation(Representation::Double());
- } else {
- set_representation(Representation::Integer32());
+ virtual Representation RequiredInputRepresentation(int index) {
+ // kind_fast: tagged[int32] (none)
+ // kind_double: tagged[int32] (none)
+ // kind_external: external[int32] (none)
+ if (index == 0) {
+ return is_external() ? Representation::External()
+ : Representation::Tagged();
}
- SetGVNFlag(kDependsOnSpecializedArrayElements);
- // Native code could change the specialized array.
- SetGVNFlag(kDependsOnCalls);
- SetFlag(kUseGVN);
+ if (index == 1) return Representation::Integer32();
+ return Representation::None();
}
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32, but the base pointer
- // for the element load is a naked pointer.
- return index == 0
- ? Representation::External()
- : Representation::Integer32();
- }
-
- HValue* external_pointer() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+ bool RequiresHoleCheck() const;
virtual Range* InferRange(Zone* zone);
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
protected:
virtual bool DataEquals(HValue* other) {
- if (!other->IsLoadKeyedSpecializedArrayElement()) return false;
- HLoadKeyedSpecializedArrayElement* cast_other =
- HLoadKeyedSpecializedArrayElement::cast(other);
- return elements_kind_ == cast_other->elements_kind();
+ if (!other->IsLoadKeyed()) return false;
+ HLoadKeyed* other_load = HLoadKeyed::cast(other);
+
+ if (IsDehoisted() && index_offset() != other_load->index_offset())
+ return false;
+ return elements_kind() == other_load->elements_kind();
}
private:
- ElementsKind elements_kind_;
- uint32_t index_offset_;
- bool is_dehoisted_;
+ virtual bool IsDeletable() const {
+ return !RequiresHoleCheck();
+ }
+
+ // Establish some checks around our packed fields
+ enum LoadKeyedBits {
+ kBitsForElementsKind = 5,
+ kBitsForIndexOffset = 26,
+ kBitsForIsDehoisted = 1,
+
+ kStartElementsKind = 0,
+ kStartIndexOffset = kStartElementsKind + kBitsForElementsKind,
+ kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset
+ };
+
+ STATIC_ASSERT((kBitsForElementsKind + kBitsForIndexOffset +
+ kBitsForIsDehoisted) <= sizeof(uint32_t)*8);
+ STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind));
+ class ElementsKindField:
+ public BitField<ElementsKind, kStartElementsKind, kBitsForElementsKind>
+ {}; // NOLINT
+ class IndexOffsetField:
+ public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset>
+ {}; // NOLINT
+ class IsDehoistedField:
+ public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted>
+ {}; // NOLINT
+ uint32_t bit_field_;
};
@@ -4147,6 +4416,7 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {
+ // tagged[tagged]
return Representation::Tagged();
}
@@ -4204,6 +4474,10 @@ class HStoreNamedField: public HTemplateInstruction<2> {
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
+ bool NeedsWriteBarrierForMap() {
+ return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
+ }
+
private:
Handle<String> name_;
bool is_in_object_;
@@ -4248,83 +4522,56 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
};
-class HStoreKeyedFastElement
+class HStoreKeyed
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
- HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
- ElementsKind elements_kind = FAST_ELEMENTS)
+ HStoreKeyed(HValue* obj, HValue* key, HValue* val,
+ ElementsKind elements_kind)
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
- SetGVNFlag(kChangesArrayElements);
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- // The key is supposed to be Integer32.
- return index == 1
- ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- HValue* object() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
- bool value_is_smi() {
- return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
- }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
- bool NeedsWriteBarrier() {
- if (value_is_smi()) {
- return false;
+ if (is_external()) {
+ SetGVNFlag(kChangesSpecializedArrayElements);
+ } else if (IsFastDoubleElementsKind(elements_kind)) {
+ SetGVNFlag(kChangesDoubleArrayElements);
+ SetFlag(kDeoptimizeOnUndefined);
} else {
- return StoringValueNeedsWriteBarrier(value());
+ SetGVNFlag(kChangesArrayElements);
}
}
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
-
- private:
- ElementsKind elements_kind_;
- uint32_t index_offset_;
- bool is_dehoisted_;
-};
-
-
-class HStoreKeyedFastDoubleElement
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
- public:
- HStoreKeyedFastDoubleElement(HValue* elements,
- HValue* key,
- HValue* val)
- : index_offset_(0), is_dehoisted_(false) {
- SetOperandAt(0, elements);
- SetOperandAt(1, key);
- SetOperandAt(2, val);
- SetGVNFlag(kChangesDoubleArrayElements);
- }
-
virtual Representation RequiredInputRepresentation(int index) {
- if (index == 1) {
+ // kind_fast: tagged[int32] = tagged
+ // kind_double: tagged[int32] = double
+ // kind_external: external[int32] = (double | int32)
+ if (index == 0) {
+ return is_external() ? Representation::External()
+ : Representation::Tagged();
+ } else if (index == 1) {
return Representation::Integer32();
- } else if (index == 2) {
+ }
+
+ ASSERT_EQ(index, 2);
+ if (IsDoubleOrFloatElementsKind(elements_kind())) {
return Representation::Double();
- } else {
- return Representation::Tagged();
}
+
+ return is_external() ? Representation::Integer32()
+ : Representation::Tagged();
}
+ bool is_external() const {
+ return IsExternalArrayElementsKind(elements_kind());
+ }
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
+ bool value_is_smi() const {
+ return IsFastSmiElementsKind(elements_kind_);
+ }
+ ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
@@ -4333,64 +4580,18 @@ class HStoreKeyedFastDoubleElement
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
+ if (value_is_smi()) {
+ return false;
+ } else {
+ return StoringValueNeedsWriteBarrier(value());
+ }
}
bool NeedsCanonicalization();
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
-
- private:
- uint32_t index_offset_;
- bool is_dehoisted_;
-};
-
-
-class HStoreKeyedSpecializedArrayElement
- : public HTemplateInstruction<3>, public ArrayInstructionInterface {
- public:
- HStoreKeyedSpecializedArrayElement(HValue* external_elements,
- HValue* key,
- HValue* val,
- ElementsKind elements_kind)
- : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
- SetGVNFlag(kChangesSpecializedArrayElements);
- SetOperandAt(0, external_elements);
- SetOperandAt(1, key);
- SetOperandAt(2, val);
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- if (index == 0) {
- return Representation::External();
- } else {
- bool float_or_double_elements =
- elements_kind() == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind() == EXTERNAL_DOUBLE_ELEMENTS;
- if (index == 2 && float_or_double_elements) {
- return Representation::Double();
- } else {
- return Representation::Integer32();
- }
- }
- }
-
- HValue* external_pointer() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
- ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- HValue* GetKey() { return key(); }
- void SetKey(HValue* key) { SetOperandAt(1, key); }
- bool IsDehoisted() { return is_dehoisted_; }
- void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
private:
ElementsKind elements_kind_;
@@ -4421,6 +4622,7 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
virtual Representation RequiredInputRepresentation(int index) {
+ // tagged[tagged] = tagged
return Representation::Tagged();
}
@@ -4443,8 +4645,14 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
SetOperandAt(0, object);
SetFlag(kUseGVN);
SetGVNFlag(kChangesElementsKind);
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
+ if (original_map->has_fast_double_elements()) {
+ SetGVNFlag(kChangesElementsPointer);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+ if (transitioned_map->has_fast_double_elements()) {
+ SetGVNFlag(kChangesElementsPointer);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
set_representation(Representation::Tagged());
}
@@ -4480,6 +4688,7 @@ class HStringAdd: public HBinaryOperation {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -4494,6 +4703,10 @@ class HStringAdd: public HBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
+ // private:
+ // virtual bool IsDeletable() const { return true; }
};
@@ -4528,6 +4741,10 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
virtual Range* InferRange(Zone* zone) {
return new(zone) Range(0, String::kMaxUtf16CodeUnit);
}
+
+ // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
+ // private:
+ // virtual bool IsDeletable() const { return true; }
};
@@ -4554,6 +4771,10 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
virtual bool DataEquals(HValue* other) { return true; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
+
+ // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
+ // private:
+ // virtual bool IsDeletable() const { return true; }
};
@@ -4582,6 +4803,9 @@ class HStringLength: public HUnaryOperation {
virtual Range* InferRange(Zone* zone) {
return new(zone) Range(0, String::kMaxLength);
}
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -4608,6 +4832,9 @@ class HAllocateObject: public HTemplateInstruction<1> {
DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
private:
+ // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
+ // virtual bool IsDeletable() const { return true; }
+
Handle<JSFunction> constructor_;
};
@@ -4624,6 +4851,8 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
int depth() const { return depth_; }
private:
+ virtual bool IsDeletable() const { return true; }
+
int literal_index_;
int depth_;
};
@@ -4682,7 +4911,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
HValue* context() { return OperandAt(0); }
ElementsKind boilerplate_elements_kind() const {
if (!boilerplate_object_->IsJSObject()) {
- return FAST_ELEMENTS;
+ return TERMINAL_FAST_ELEMENTS_KIND;
}
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
}
@@ -4744,10 +4973,12 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
class HRegExpLiteral: public HMaterializedLiteral<1> {
public:
HRegExpLiteral(HValue* context,
+ Handle<FixedArray> literals,
Handle<String> pattern,
Handle<String> flags,
int literal_index)
: HMaterializedLiteral<1>(literal_index, 0),
+ literals_(literals),
pattern_(pattern),
flags_(flags) {
SetOperandAt(0, context);
@@ -4755,6 +4986,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
}
HValue* context() { return OperandAt(0); }
+ Handle<FixedArray> literals() { return literals_; }
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
@@ -4766,6 +4998,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
private:
+ Handle<FixedArray> literals_;
Handle<String> pattern_;
Handle<String> flags_;
};
@@ -4795,6 +5028,8 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
bool pretenure() const { return pretenure_; }
private:
+ virtual bool IsDeletable() const { return true; }
+
Handle<SharedFunctionInfo> shared_info_;
bool pretenure_;
};
@@ -4811,7 +5046,6 @@ class HTypeof: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- virtual HValue* Canonicalize();
virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {
@@ -4819,6 +5053,9 @@ class HTypeof: public HTemplateInstruction<2> {
}
DECLARE_CONCRETE_INSTRUCTION(Typeof)
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -4837,6 +5074,9 @@ class HToFastProperties: public HUnaryOperation {
}
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -4851,6 +5091,9 @@ class HValueOf: public HUnaryOperation {
}
DECLARE_CONCRETE_INSTRUCTION(ValueOf)
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
@@ -5047,6 +5290,9 @@ class HLoadFieldByIndex : public HTemplateInstruction<2> {
}
DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
+
+ private:
+ virtual bool IsDeletable() const { return true; }
};
diff --git a/src/3rdparty/v8/src/hydrogen.cc b/src/3rdparty/v8/src/hydrogen.cc
index cfe80d2..043d567 100644
--- a/src/3rdparty/v8/src/hydrogen.cc
+++ b/src/3rdparty/v8/src/hydrogen.cc
@@ -55,19 +55,19 @@ namespace internal {
HBasicBlock::HBasicBlock(HGraph* graph)
: block_id_(graph->GetNextBlockID()),
graph_(graph),
- phis_(4),
+ phis_(4, graph->zone()),
first_(NULL),
last_(NULL),
end_(NULL),
loop_information_(NULL),
- predecessors_(2),
+ predecessors_(2, graph->zone()),
dominator_(NULL),
- dominated_blocks_(4),
+ dominated_blocks_(4, graph->zone()),
last_environment_(NULL),
argument_count_(-1),
first_instruction_index_(-1),
last_instruction_index_(-1),
- deleted_phis_(4),
+ deleted_phis_(4, graph->zone()),
parent_loop_header_(NULL),
is_inline_return_target_(false),
is_deoptimizing_(false),
@@ -76,7 +76,7 @@ HBasicBlock::HBasicBlock(HGraph* graph)
void HBasicBlock::AttachLoopInformation() {
ASSERT(!IsLoopHeader());
- loop_information_ = new(zone()) HLoopInformation(this);
+ loop_information_ = new(zone()) HLoopInformation(this, zone());
}
@@ -88,7 +88,7 @@ void HBasicBlock::DetachLoopInformation() {
void HBasicBlock::AddPhi(HPhi* phi) {
ASSERT(!IsStartBlock());
- phis_.Add(phi);
+ phis_.Add(phi, zone());
phi->SetBlock(this);
}
@@ -119,29 +119,30 @@ void HBasicBlock::AddInstruction(HInstruction* instr) {
HDeoptimize* HBasicBlock::CreateDeoptimize(
HDeoptimize::UseEnvironment has_uses) {
ASSERT(HasEnvironment());
- if (has_uses == HDeoptimize::kNoUses) return new(zone()) HDeoptimize(0);
+ if (has_uses == HDeoptimize::kNoUses)
+ return new(zone()) HDeoptimize(0, zone());
HEnvironment* environment = last_environment();
- HDeoptimize* instr = new(zone()) HDeoptimize(environment->length());
+ HDeoptimize* instr = new(zone()) HDeoptimize(environment->length(), zone());
for (int i = 0; i < environment->length(); i++) {
HValue* val = environment->values()->at(i);
- instr->AddEnvironmentValue(val);
+ instr->AddEnvironmentValue(val, zone());
}
return instr;
}
-HSimulate* HBasicBlock::CreateSimulate(int ast_id) {
+HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
- ASSERT(ast_id == AstNode::kNoNumber ||
+ ASSERT(ast_id.IsNone() ||
environment->closure()->shared()->VerifyBailoutId(ast_id));
int push_count = environment->push_count();
int pop_count = environment->pop_count();
- HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count);
+ HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count, zone());
for (int i = push_count - 1; i >= 0; --i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
@@ -165,32 +166,31 @@ void HBasicBlock::Finish(HControlInstruction* end) {
void HBasicBlock::Goto(HBasicBlock* block, FunctionState* state) {
- bool drop_extra = state != NULL && state->drop_extra();
- bool arguments_pushed = state != NULL && state->arguments_pushed();
+ bool drop_extra = state != NULL &&
+ state->inlining_kind() == DROP_EXTRA_ON_RETURN;
if (block->IsInlineReturnTarget()) {
- AddInstruction(new(zone()) HLeaveInlined(arguments_pushed));
+ AddInstruction(new(zone()) HLeaveInlined());
last_environment_ = last_environment()->DiscardInlined(drop_extra);
}
- AddSimulate(AstNode::kNoNumber);
+ AddSimulate(BailoutId::None());
HGoto* instr = new(zone()) HGoto(block);
Finish(instr);
}
void HBasicBlock::AddLeaveInlined(HValue* return_value,
- HBasicBlock* target,
FunctionState* state) {
- bool drop_extra = state != NULL && state->drop_extra();
- bool arguments_pushed = state != NULL && state->arguments_pushed();
+ HBasicBlock* target = state->function_return();
+ bool drop_extra = state->inlining_kind() == DROP_EXTRA_ON_RETURN;
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
- AddInstruction(new(zone()) HLeaveInlined(arguments_pushed));
+ AddInstruction(new(zone()) HLeaveInlined());
last_environment_ = last_environment()->DiscardInlined(drop_extra);
last_environment()->Push(return_value);
- AddSimulate(AstNode::kNoNumber);
+ AddSimulate(BailoutId::None());
HGoto* instr = new(zone()) HGoto(target);
Finish(instr);
}
@@ -203,7 +203,7 @@ void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
}
-void HBasicBlock::SetJoinId(int ast_id) {
+void HBasicBlock::SetJoinId(BailoutId ast_id) {
int length = predecessors_.length();
ASSERT(length > 0);
for (int i = 0; i < length; i++) {
@@ -278,7 +278,7 @@ void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
SetInitialEnvironment(pred->last_environment()->Copy());
}
- predecessors_.Add(pred);
+ predecessors_.Add(pred, zone());
}
@@ -291,7 +291,7 @@ void HBasicBlock::AddDominatedBlock(HBasicBlock* block) {
dominated_blocks_[index]->block_id() < block->block_id()) {
++index;
}
- dominated_blocks_.InsertAt(index, block);
+ dominated_blocks_.InsertAt(index, block, zone());
}
@@ -404,7 +404,7 @@ void HBasicBlock::Verify() {
void HLoopInformation::RegisterBackEdge(HBasicBlock* block) {
- this->back_edges_.Add(block);
+ this->back_edges_.Add(block, block->zone());
AddBlock(block);
}
@@ -430,7 +430,7 @@ void HLoopInformation::AddBlock(HBasicBlock* block) {
AddBlock(block->parent_loop_header());
} else {
block->set_parent_loop_header(loop_header());
- blocks_.Add(block);
+ blocks_.Add(block, block->zone());
for (int i = 0; i < block->predecessors()->length(); ++i) {
AddBlock(block->predecessors()->at(i));
}
@@ -451,8 +451,8 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
int block_count,
HBasicBlock* dont_visit)
: visited_count_(0),
- stack_(16),
- reachable_(block_count, ZONE),
+ stack_(16, entry_block->zone()),
+ reachable_(block_count, entry_block->zone()),
dont_visit_(dont_visit) {
PushBlock(entry_block);
Analyze();
@@ -466,7 +466,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
if (block != NULL && block != dont_visit_ &&
!reachable_.Contains(block->block_id())) {
reachable_.Add(block->block_id());
- stack_.Add(block);
+ stack_.Add(block, block->zone());
visited_count_++;
}
}
@@ -526,7 +526,8 @@ void HGraph::Verify(bool do_full_verify) const {
// Check that all join blocks have predecessors that end with an
// unconditional goto and agree on their environment node id.
if (block->predecessors()->length() >= 2) {
- int id = block->predecessors()->first()->last_environment()->ast_id();
+ BailoutId id =
+ block->predecessors()->first()->last_environment()->ast_id();
for (int k = 0; k < block->predecessors()->length(); k++) {
HBasicBlock* predecessor = block->predecessors()->at(k);
ASSERT(predecessor->end()->IsGoto());
@@ -567,9 +568,9 @@ void HGraph::Verify(bool do_full_verify) const {
HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
- Object* value) {
+ Handle<Object> value) {
if (!pointer->is_set()) {
- HConstant* constant = new(zone()) HConstant(Handle<Object>(value),
+ HConstant* constant = new(zone()) HConstant(value,
Representation::Tagged());
constant->InsertAfter(GetConstantUndefined());
pointer->set(constant);
@@ -578,28 +579,40 @@ HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
}
+HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer,
+ int32_t value) {
+ if (!pointer->is_set()) {
+ HConstant* constant =
+ new(zone()) HConstant(value, Representation::Integer32());
+ constant->InsertAfter(GetConstantUndefined());
+ pointer->set(constant);
+ }
+ return pointer->get();
+}
+
+
HConstant* HGraph::GetConstant1() {
- return GetConstant(&constant_1_, Smi::FromInt(1));
+ return GetConstantInt32(&constant_1_, 1);
}
HConstant* HGraph::GetConstantMinus1() {
- return GetConstant(&constant_minus1_, Smi::FromInt(-1));
+ return GetConstantInt32(&constant_minus1_, -1);
}
HConstant* HGraph::GetConstantTrue() {
- return GetConstant(&constant_true_, isolate()->heap()->true_value());
+ return GetConstant(&constant_true_, isolate()->factory()->true_value());
}
HConstant* HGraph::GetConstantFalse() {
- return GetConstant(&constant_false_, isolate()->heap()->false_value());
+ return GetConstant(&constant_false_, isolate()->factory()->false_value());
}
HConstant* HGraph::GetConstantHole() {
- return GetConstant(&constant_hole_, isolate()->heap()->the_hole_value());
+ return GetConstant(&constant_hole_, isolate()->factory()->the_hole_value());
}
@@ -612,8 +625,8 @@ HGraphBuilder::HGraphBuilder(CompilationInfo* info,
graph_(NULL),
current_block_(NULL),
inlined_count_(0),
- globals_(10),
- zone_(info->isolate()->zone()),
+ globals_(10, info->zone()),
+ zone_(info->zone()),
inline_bailout_(false) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
@@ -623,7 +636,7 @@ HGraphBuilder::HGraphBuilder(CompilationInfo* info,
HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
HBasicBlock* second,
- int join_id) {
+ BailoutId join_id) {
if (first == NULL) {
return second;
} else if (second == NULL) {
@@ -676,61 +689,26 @@ HGraph::HGraph(CompilationInfo* info)
: isolate_(info->isolate()),
next_block_id_(0),
entry_block_(NULL),
- blocks_(8),
- values_(16),
- phi_list_(NULL) {
+ blocks_(8, info->zone()),
+ values_(16, info->zone()),
+ phi_list_(NULL),
+ uint32_instructions_(NULL),
+ info_(info),
+ zone_(info->zone()),
+ is_recursive_(false),
+ use_optimistic_licm_(false),
+ type_change_checksum_(0) {
start_environment_ =
- new(zone()) HEnvironment(NULL, info->scope(), info->closure());
- start_environment_->set_ast_id(AstNode::kFunctionEntryId);
+ new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ start_environment_->set_ast_id(BailoutId::FunctionEntry());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
}
-Handle<Code> HGraph::Compile(CompilationInfo* info) {
- int values = GetMaximumValueID();
- if (values > LUnallocated::kMaxVirtualRegisters) {
- if (FLAG_trace_bailout) {
- PrintF("Not enough virtual registers for (values).\n");
- }
- return Handle<Code>::null();
- }
- LAllocator allocator(values, this);
- LChunkBuilder builder(info, this, &allocator);
- LChunk* chunk = builder.Build();
- if (chunk == NULL) return Handle<Code>::null();
-
- if (!allocator.Allocate(chunk)) {
- if (FLAG_trace_bailout) {
- PrintF("Not enough virtual registers (regalloc).\n");
- }
- return Handle<Code>::null();
- }
-
- MacroAssembler assembler(info->isolate(), NULL, 0);
- LCodeGen generator(chunk, &assembler, info);
-
- chunk->MarkEmptyBlocks();
-
- if (generator.GenerateCode()) {
- if (FLAG_trace_codegen) {
- PrintF("Crankshaft Compiler - ");
- }
- CodeGenerator::MakeCodePrologue(info);
- Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
- Handle<Code> code =
- CodeGenerator::MakeCodeEpilogue(&assembler, flags, info);
- generator.FinishCode(code);
- CodeGenerator::PrintCode(code, info);
- return code;
- }
- return Handle<Code>::null();
-}
-
-
HBasicBlock* HGraph::CreateBasicBlock() {
HBasicBlock* result = new(zone()) HBasicBlock(this);
- blocks_.Add(result);
+ blocks_.Add(result, zone());
return result;
}
@@ -748,66 +726,319 @@ void HGraph::Canonicalize() {
}
}
+// Block ordering was implemented with two mutually recursive methods,
+// HGraph::Postorder and HGraph::PostorderLoopBlocks.
+// The recursion could lead to stack overflow so the algorithm has been
+// implemented iteratively.
+// At a high level the algorithm looks like this:
+//
+// Postorder(block, loop_header) : {
+// if (block has already been visited or is of another loop) return;
+// mark block as visited;
+// if (block is a loop header) {
+// VisitLoopMembers(block, loop_header);
+// VisitSuccessorsOfLoopHeader(block);
+// } else {
+// VisitSuccessors(block)
+// }
+// put block in result list;
+// }
+//
+// VisitLoopMembers(block, outer_loop_header) {
+// foreach (block b in block loop members) {
+// VisitSuccessorsOfLoopMember(b, outer_loop_header);
+// if (b is loop header) VisitLoopMembers(b);
+// }
+// }
+//
+// VisitSuccessorsOfLoopMember(block, outer_loop_header) {
+// foreach (block b in block successors) Postorder(b, outer_loop_header)
+// }
+//
+// VisitSuccessorsOfLoopHeader(block) {
+// foreach (block b in block successors) Postorder(b, block)
+// }
+//
+// VisitSuccessors(block, loop_header) {
+// foreach (block b in block successors) Postorder(b, loop_header)
+// }
+//
+// The ordering is started calling Postorder(entry, NULL).
+//
+// Each instance of PostorderProcessor represents the "stack frame" of the
+// recursion, and particularly keeps the state of the loop (iteration) of the
+// "Visit..." function it represents.
+// To recycle memory we keep all the frames in a double linked list but
+// this means that we cannot use constructors to initialize the frames.
+//
+class PostorderProcessor : public ZoneObject {
+ public:
+ // Back link (towards the stack bottom).
+ PostorderProcessor* parent() {return father_; }
+ // Forward link (towards the stack top).
+ PostorderProcessor* child() {return child_; }
+ HBasicBlock* block() { return block_; }
+ HLoopInformation* loop() { return loop_; }
+ HBasicBlock* loop_header() { return loop_header_; }
+
+ static PostorderProcessor* CreateEntryProcessor(Zone* zone,
+ HBasicBlock* block,
+ BitVector* visited) {
+ PostorderProcessor* result = new(zone) PostorderProcessor(NULL);
+ return result->SetupSuccessors(zone, block, NULL, visited);
+ }
+
+ PostorderProcessor* PerformStep(Zone* zone,
+ BitVector* visited,
+ ZoneList<HBasicBlock*>* order) {
+ PostorderProcessor* next =
+ PerformNonBacktrackingStep(zone, visited, order);
+ if (next != NULL) {
+ return next;
+ } else {
+ return Backtrack(zone, visited, order);
+ }
+ }
-void HGraph::OrderBlocks() {
- HPhase phase("H_Block ordering");
- BitVector visited(blocks_.length(), zone());
-
- ZoneList<HBasicBlock*> reverse_result(8);
- HBasicBlock* start = blocks_[0];
- Postorder(start, &visited, &reverse_result, NULL);
+ private:
+ explicit PostorderProcessor(PostorderProcessor* father)
+ : father_(father), child_(NULL), successor_iterator(NULL) { }
+
+ // Each enum value states the cycle whose state is kept by this instance.
+ enum LoopKind {
+ NONE,
+ SUCCESSORS,
+ SUCCESSORS_OF_LOOP_HEADER,
+ LOOP_MEMBERS,
+ SUCCESSORS_OF_LOOP_MEMBER
+ };
+
+ // Each "Setup..." method is like a constructor for a cycle state.
+ PostorderProcessor* SetupSuccessors(Zone* zone,
+ HBasicBlock* block,
+ HBasicBlock* loop_header,
+ BitVector* visited) {
+ if (block == NULL || visited->Contains(block->block_id()) ||
+ block->parent_loop_header() != loop_header) {
+ kind_ = NONE;
+ block_ = NULL;
+ loop_ = NULL;
+ loop_header_ = NULL;
+ return this;
+ } else {
+ block_ = block;
+ loop_ = NULL;
+ visited->Add(block->block_id());
- blocks_.Rewind(0);
- int index = 0;
- for (int i = reverse_result.length() - 1; i >= 0; --i) {
- HBasicBlock* b = reverse_result[i];
- blocks_.Add(b);
- b->set_block_id(index++);
+ if (block->IsLoopHeader()) {
+ kind_ = SUCCESSORS_OF_LOOP_HEADER;
+ loop_header_ = block;
+ InitializeSuccessors();
+ PostorderProcessor* result = Push(zone);
+ return result->SetupLoopMembers(zone, block, block->loop_information(),
+ loop_header);
+ } else {
+ ASSERT(block->IsFinished());
+ kind_ = SUCCESSORS;
+ loop_header_ = loop_header;
+ InitializeSuccessors();
+ return this;
+ }
+ }
}
-}
+ PostorderProcessor* SetupLoopMembers(Zone* zone,
+ HBasicBlock* block,
+ HLoopInformation* loop,
+ HBasicBlock* loop_header) {
+ kind_ = LOOP_MEMBERS;
+ block_ = block;
+ loop_ = loop;
+ loop_header_ = loop_header;
+ InitializeLoopMembers();
+ return this;
+ }
+
+ PostorderProcessor* SetupSuccessorsOfLoopMember(
+ HBasicBlock* block,
+ HLoopInformation* loop,
+ HBasicBlock* loop_header) {
+ kind_ = SUCCESSORS_OF_LOOP_MEMBER;
+ block_ = block;
+ loop_ = loop;
+ loop_header_ = loop_header;
+ InitializeSuccessors();
+ return this;
+ }
+
+ // This method "allocates" a new stack frame.
+ PostorderProcessor* Push(Zone* zone) {
+ if (child_ == NULL) {
+ child_ = new(zone) PostorderProcessor(this);
+ }
+ return child_;
+ }
+
+ void ClosePostorder(ZoneList<HBasicBlock*>* order, Zone* zone) {
+ ASSERT(block_->end()->FirstSuccessor() == NULL ||
+ order->Contains(block_->end()->FirstSuccessor()) ||
+ block_->end()->FirstSuccessor()->IsLoopHeader());
+ ASSERT(block_->end()->SecondSuccessor() == NULL ||
+ order->Contains(block_->end()->SecondSuccessor()) ||
+ block_->end()->SecondSuccessor()->IsLoopHeader());
+ order->Add(block_, zone);
+ }
+
+ // This method is the basic block to walk up the stack.
+ PostorderProcessor* Pop(Zone* zone,
+ BitVector* visited,
+ ZoneList<HBasicBlock*>* order) {
+ switch (kind_) {
+ case SUCCESSORS:
+ case SUCCESSORS_OF_LOOP_HEADER:
+ ClosePostorder(order, zone);
+ return father_;
+ case LOOP_MEMBERS:
+ return father_;
+ case SUCCESSORS_OF_LOOP_MEMBER:
+ if (block()->IsLoopHeader() && block() != loop_->loop_header()) {
+ // In this case we need to perform a LOOP_MEMBERS cycle so we
+ // initialize it and return this instead of father.
+ return SetupLoopMembers(zone, block(),
+ block()->loop_information(), loop_header_);
+ } else {
+ return father_;
+ }
+ case NONE:
+ return father_;
+ }
+ UNREACHABLE();
+ return NULL;
+ }
-void HGraph::PostorderLoopBlocks(HLoopInformation* loop,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header) {
- for (int i = 0; i < loop->blocks()->length(); ++i) {
- HBasicBlock* b = loop->blocks()->at(i);
- for (HSuccessorIterator it(b->end()); !it.Done(); it.Advance()) {
- Postorder(it.Current(), visited, order, loop_header);
+ // Walks up the stack.
+ PostorderProcessor* Backtrack(Zone* zone,
+ BitVector* visited,
+ ZoneList<HBasicBlock*>* order) {
+ PostorderProcessor* parent = Pop(zone, visited, order);
+ while (parent != NULL) {
+ PostorderProcessor* next =
+ parent->PerformNonBacktrackingStep(zone, visited, order);
+ if (next != NULL) {
+ return next;
+ } else {
+ parent = parent->Pop(zone, visited, order);
+ }
}
- if (b->IsLoopHeader() && b != loop->loop_header()) {
- PostorderLoopBlocks(b->loop_information(), visited, order, loop_header);
+ return NULL;
+ }
+
+ PostorderProcessor* PerformNonBacktrackingStep(
+ Zone* zone,
+ BitVector* visited,
+ ZoneList<HBasicBlock*>* order) {
+ HBasicBlock* next_block;
+ switch (kind_) {
+ case SUCCESSORS:
+ next_block = AdvanceSuccessors();
+ if (next_block != NULL) {
+ PostorderProcessor* result = Push(zone);
+ return result->SetupSuccessors(zone, next_block,
+ loop_header_, visited);
+ }
+ break;
+ case SUCCESSORS_OF_LOOP_HEADER:
+ next_block = AdvanceSuccessors();
+ if (next_block != NULL) {
+ PostorderProcessor* result = Push(zone);
+ return result->SetupSuccessors(zone, next_block,
+ block(), visited);
+ }
+ break;
+ case LOOP_MEMBERS:
+ next_block = AdvanceLoopMembers();
+ if (next_block != NULL) {
+ PostorderProcessor* result = Push(zone);
+ return result->SetupSuccessorsOfLoopMember(next_block,
+ loop_, loop_header_);
+ }
+ break;
+ case SUCCESSORS_OF_LOOP_MEMBER:
+ next_block = AdvanceSuccessors();
+ if (next_block != NULL) {
+ PostorderProcessor* result = Push(zone);
+ return result->SetupSuccessors(zone, next_block,
+ loop_header_, visited);
+ }
+ break;
+ case NONE:
+ return NULL;
}
+ return NULL;
}
-}
+ // The following two methods implement a "foreach b in successors" cycle.
+ void InitializeSuccessors() {
+ loop_index = 0;
+ loop_length = 0;
+ successor_iterator = HSuccessorIterator(block_->end());
+ }
-void HGraph::Postorder(HBasicBlock* block,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header) {
- if (block == NULL || visited->Contains(block->block_id())) return;
- if (block->parent_loop_header() != loop_header) return;
- visited->Add(block->block_id());
- if (block->IsLoopHeader()) {
- PostorderLoopBlocks(block->loop_information(), visited, order, loop_header);
- for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
- Postorder(it.Current(), visited, order, block);
+ HBasicBlock* AdvanceSuccessors() {
+ if (!successor_iterator.Done()) {
+ HBasicBlock* result = successor_iterator.Current();
+ successor_iterator.Advance();
+ return result;
}
- } else {
- ASSERT(block->IsFinished());
- for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
- Postorder(it.Current(), visited, order, loop_header);
+ return NULL;
+ }
+
+ // The following two methods implement a "foreach b in loop members" cycle.
+ void InitializeLoopMembers() {
+ loop_index = 0;
+ loop_length = loop_->blocks()->length();
+ }
+
+ HBasicBlock* AdvanceLoopMembers() {
+ if (loop_index < loop_length) {
+ HBasicBlock* result = loop_->blocks()->at(loop_index);
+ loop_index++;
+ return result;
+ } else {
+ return NULL;
}
}
- ASSERT(block->end()->FirstSuccessor() == NULL ||
- order->Contains(block->end()->FirstSuccessor()) ||
- block->end()->FirstSuccessor()->IsLoopHeader());
- ASSERT(block->end()->SecondSuccessor() == NULL ||
- order->Contains(block->end()->SecondSuccessor()) ||
- block->end()->SecondSuccessor()->IsLoopHeader());
- order->Add(block);
+
+ LoopKind kind_;
+ PostorderProcessor* father_;
+ PostorderProcessor* child_;
+ HLoopInformation* loop_;
+ HBasicBlock* block_;
+ HBasicBlock* loop_header_;
+ int loop_index;
+ int loop_length;
+ HSuccessorIterator successor_iterator;
+};
+
+
+void HGraph::OrderBlocks() {
+ HPhase phase("H_Block ordering");
+ BitVector visited(blocks_.length(), zone());
+
+ ZoneList<HBasicBlock*> reverse_result(8, zone());
+ HBasicBlock* start = blocks_[0];
+ PostorderProcessor* postorder =
+ PostorderProcessor::CreateEntryProcessor(zone(), start, &visited);
+ while (postorder != NULL) {
+ postorder = postorder->PerformStep(zone(), &visited, &reverse_result);
+ }
+ blocks_.Rewind(0);
+ int index = 0;
+ for (int i = reverse_result.length() - 1; i >= 0; --i) {
+ HBasicBlock* b = reverse_result[i];
+ blocks_.Add(b, zone());
+ b->set_block_id(index++);
+ }
}
@@ -849,9 +1080,9 @@ void HGraph::EliminateRedundantPhis() {
// Worklist of phis that can potentially be eliminated. Initialized with
// all phi nodes. When elimination of a phi node modifies another phi node
// the modified phi node is added to the worklist.
- ZoneList<HPhi*> worklist(blocks_.length());
+ ZoneList<HPhi*> worklist(blocks_.length(), zone());
for (int i = 0; i < blocks_.length(); ++i) {
- worklist.AddAll(*blocks_[i]->phis());
+ worklist.AddAll(*blocks_[i]->phis(), zone());
}
while (!worklist.is_empty()) {
@@ -869,7 +1100,7 @@ void HGraph::EliminateRedundantPhis() {
for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
value->SetOperandAt(it.index(), replacement);
- if (value->IsPhi()) worklist.Add(HPhi::cast(value));
+ if (value->IsPhi()) worklist.Add(HPhi::cast(value), zone());
}
block->RemovePhi(phi);
}
@@ -881,18 +1112,18 @@ void HGraph::EliminateUnreachablePhis() {
HPhase phase("H_Unreachable phi elimination", this);
// Initialize worklist.
- ZoneList<HPhi*> phi_list(blocks_.length());
- ZoneList<HPhi*> worklist(blocks_.length());
+ ZoneList<HPhi*> phi_list(blocks_.length(), zone());
+ ZoneList<HPhi*> worklist(blocks_.length(), zone());
for (int i = 0; i < blocks_.length(); ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list.Add(phi);
+ phi_list.Add(phi, zone());
// We can't eliminate phis in the receiver position in the environment
// because in case of throwing an error we need this value to
// construct a stack trace.
if (phi->HasRealUses() || phi->IsReceiver()) {
phi->set_is_live(true);
- worklist.Add(phi);
+ worklist.Add(phi, zone());
}
}
}
@@ -904,7 +1135,7 @@ void HGraph::EliminateUnreachablePhis() {
HValue* operand = phi->OperandAt(i);
if (operand->IsPhi() && !HPhi::cast(operand)->is_live()) {
HPhi::cast(operand)->set_is_live(true);
- worklist.Add(HPhi::cast(operand));
+ worklist.Add(HPhi::cast(operand), zone());
}
}
}
@@ -951,11 +1182,11 @@ bool HGraph::CheckConstPhiUses() {
void HGraph::CollectPhis() {
int block_count = blocks_.length();
- phi_list_ = new ZoneList<HPhi*>(block_count);
+ phi_list_ = new(zone()) ZoneList<HPhi*>(block_count, zone());
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list_->Add(phi);
+ phi_list_->Add(phi, zone());
}
}
}
@@ -976,7 +1207,7 @@ void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
HValue* use = it.value();
if (!in_worklist.Contains(use->id())) {
in_worklist.Add(use->id());
- worklist->Add(use);
+ worklist->Add(use, zone());
}
}
}
@@ -987,7 +1218,7 @@ void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
class HRangeAnalysis BASE_EMBEDDED {
public:
explicit HRangeAnalysis(HGraph* graph) :
- graph_(graph), zone_(graph->isolate()->zone()), changed_ranges_(16) { }
+ graph_(graph), zone_(graph->zone()), changed_ranges_(16, zone_) { }
void Analyze();
@@ -1132,7 +1363,7 @@ void HRangeAnalysis::RollBackTo(int index) {
void HRangeAnalysis::AddRange(HValue* value, Range* range) {
Range* original_range = value->range();
value->AddNewRange(range, zone_);
- changed_ranges_.Add(value);
+ changed_ranges_.Add(value, zone_);
Range* new_range = value->range();
TraceRange("Updated range of %d set to [%d,%d]\n",
value->id(),
@@ -1260,18 +1491,18 @@ HValue* HValueMap::Lookup(HValue* value) const {
}
-void HValueMap::Resize(int new_size) {
+void HValueMap::Resize(int new_size, Zone* zone) {
ASSERT(new_size > count_);
// Hashing the values into the new array has no more collisions than in the
// old hash map, so we can use the existing lists_ array, if we are careful.
// Make sure we have at least one free element.
if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1);
+ ResizeLists(lists_size_ << 1, zone);
}
HValueMapListElement* new_array =
- ZONE->NewArray<HValueMapListElement>(new_size);
+ zone->NewArray<HValueMapListElement>(new_size);
memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_array = array_;
@@ -1289,14 +1520,14 @@ void HValueMap::Resize(int new_size) {
if (old_array[i].value != NULL) {
int current = old_array[i].next;
while (current != kNil) {
- Insert(lists_[current].value);
+ Insert(lists_[current].value, zone);
int next = lists_[current].next;
lists_[current].next = free_list_head_;
free_list_head_ = current;
current = next;
}
// Rehash the directly stored value.
- Insert(old_array[i].value);
+ Insert(old_array[i].value, zone);
}
}
}
@@ -1305,11 +1536,11 @@ void HValueMap::Resize(int new_size) {
}
-void HValueMap::ResizeLists(int new_size) {
+void HValueMap::ResizeLists(int new_size, Zone* zone) {
ASSERT(new_size > lists_size_);
HValueMapListElement* new_lists =
- ZONE->NewArray<HValueMapListElement>(new_size);
+ zone->NewArray<HValueMapListElement>(new_size);
memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_lists = lists_;
@@ -1328,10 +1559,10 @@ void HValueMap::ResizeLists(int new_size) {
}
-void HValueMap::Insert(HValue* value) {
+void HValueMap::Insert(HValue* value, Zone* zone) {
ASSERT(value != NULL);
// Resizing when half of the hashtable is filled up.
- if (count_ >= array_size_ >> 1) Resize(array_size_ << 1);
+ if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
ASSERT(count_ < array_size_);
count_++;
uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
@@ -1340,7 +1571,7 @@ void HValueMap::Insert(HValue* value) {
array_[pos].next = kNil;
} else {
if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1);
+ ResizeLists(lists_size_ << 1, zone);
}
int new_element_pos = free_list_head_;
ASSERT(new_element_pos != kNil);
@@ -1359,10 +1590,17 @@ HSideEffectMap::HSideEffectMap() : count_(0) {
HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
- memcpy(data_, other->data_, kNumberOfTrackedSideEffects * kPointerSize);
+ *this = *other; // Calls operator=.
}
+HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
+ if (this != &other) {
+ memcpy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
+ }
+ return *this;
+}
+
void HSideEffectMap::Kill(GVNFlagSet flags) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
@@ -1473,15 +1711,17 @@ class HGlobalValueNumberer BASE_EMBEDDED {
: graph_(graph),
info_(info),
removed_side_effects_(false),
- block_side_effects_(graph->blocks()->length()),
- loop_side_effects_(graph->blocks()->length()),
+ block_side_effects_(graph->blocks()->length(), graph->zone()),
+ loop_side_effects_(graph->blocks()->length(), graph->zone()),
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
- ASSERT(info->isolate()->heap()->allow_allocation(false));
- block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
- }
- ~HGlobalValueNumberer() {
- ASSERT(!info_->isolate()->heap()->allow_allocation(true));
+#ifdef DEBUG
+ ASSERT(info->isolate()->optimizing_compiler_thread()->IsOptimizerThread() ||
+ !info->isolate()->heap()->IsAllocationAllowed());
+#endif
+ block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
+ graph_->zone());
+ loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
+ graph_->zone());
}
// Returns true if values with side effects are removed.
@@ -1491,9 +1731,7 @@ class HGlobalValueNumberer BASE_EMBEDDED {
GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
- void AnalyzeBlock(HBasicBlock* block,
- HValueMap* map,
- HSideEffectMap* dominators);
+ void AnalyzeGraph();
void ComputeBlockSideEffects();
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
@@ -1506,7 +1744,7 @@ class HGlobalValueNumberer BASE_EMBEDDED {
HGraph* graph() { return graph_; }
CompilationInfo* info() { return info_; }
- Zone* zone() { return graph_->zone(); }
+ Zone* zone() const { return graph_->zone(); }
HGraph* graph_;
CompilationInfo* info_;
@@ -1530,9 +1768,7 @@ bool HGlobalValueNumberer::Analyze() {
if (FLAG_loop_invariant_code_motion) {
LoopInvariantCodeMotion();
}
- HValueMap* map = new(zone()) HValueMap();
- HSideEffectMap side_effect_dominators;
- AnalyzeBlock(graph_->entry_block(), map, &side_effect_dominators);
+ AnalyzeGraph();
return removed_side_effects_;
}
@@ -1664,6 +1900,8 @@ GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
void HGlobalValueNumberer::LoopInvariantCodeMotion() {
+ TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
+ graph_->use_optimistic_licm() ? "yes" : "no");
for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph_->blocks()->at(i);
if (block->IsLoopHeader()) {
@@ -1707,51 +1945,8 @@ void HGlobalValueNumberer::ProcessLoopBlock(
*GetGVNFlagsString(instr->gvn_flags()),
*GetGVNFlagsString(loop_kills));
bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
- if (instr->IsTransitionElementsKind()) {
- // It's possible to hoist transitions out of a loop as long as the
- // hoisting wouldn't move the transition past a DependsOn of one of it's
- // changes or any instructions that might change an objects map or
- // elements contents.
- GVNFlagSet changes = instr->ChangesFlags();
- GVNFlagSet hoist_depends_blockers =
- HValue::ConvertChangesToDependsFlags(changes);
- // In addition to not hoisting transitions above other instructions that
- // change dependencies that the transition changes, it must not be
- // hoisted above map changes and stores to an elements backing store
- // that the transition might change.
- GVNFlagSet hoist_change_blockers = changes;
- hoist_change_blockers.Add(kChangesMaps);
- HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr);
- if (trans->original_map()->has_fast_double_elements()) {
- hoist_change_blockers.Add(kChangesDoubleArrayElements);
- }
- if (trans->transitioned_map()->has_fast_double_elements()) {
- hoist_change_blockers.Add(kChangesArrayElements);
- }
- if (FLAG_trace_gvn) {
- GVNFlagSet hoist_blockers = hoist_depends_blockers;
- hoist_blockers.Add(hoist_change_blockers);
- GVNFlagSet first_time = *first_time_changes;
- first_time.Add(*first_time_depends);
- TRACE_GVN_4("Checking dependencies on HTransitionElementsKind "
- "%d (%s) hoist blockers: %s; "
- "first-time accumulated: %s\n",
- instr->id(),
- instr->Mnemonic(),
- *GetGVNFlagsString(hoist_blockers),
- *GetGVNFlagsString(first_time));
- }
- // It's possible to hoist transition from the current loop loop only if
- // they dominate all of the successor blocks in the same loop and there
- // are not any instructions that have Changes/DependsOn that intervene
- // between it and the beginning of the loop header.
- bool in_nested_loop = block != loop_header &&
- ((block->parent_loop_header() != loop_header) ||
- block->IsLoopHeader());
- can_hoist = !in_nested_loop &&
- block->IsLoopSuccessorDominator() &&
- !first_time_depends->ContainsAnyOf(hoist_depends_blockers) &&
- !first_time_changes->ContainsAnyOf(hoist_change_blockers);
+ if (can_hoist && !graph()->use_optimistic_licm()) {
+ can_hoist = block->IsLoopSuccessorDominator();
}
if (can_hoist) {
@@ -1794,7 +1989,7 @@ void HGlobalValueNumberer::ProcessLoopBlock(
bool HGlobalValueNumberer::AllowCodeMotion() {
- return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
+ return info()->shared_info()->opt_count() + 1 < FLAG_max_opt_count;
}
@@ -1826,89 +2021,220 @@ GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
}
-void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block,
- HValueMap* map,
- HSideEffectMap* dominators) {
- TRACE_GVN_2("Analyzing block B%d%s\n",
- block->block_id(),
- block->IsLoopHeader() ? " (loop header)" : "");
+// Each instance of this class is like a "stack frame" for the recursive
+// traversal of the dominator tree done during GVN (the stack is handled
+// as a double linked list).
+// We reuse frames when possible so the list length is limited by the depth
+// of the dominator tree but this forces us to initialize each frame calling
+// an explicit "Initialize" method instead of a using constructor.
+class GvnBasicBlockState: public ZoneObject {
+ public:
+ static GvnBasicBlockState* CreateEntry(Zone* zone,
+ HBasicBlock* entry_block,
+ HValueMap* entry_map) {
+ return new(zone)
+ GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
+ }
+
+ HBasicBlock* block() { return block_; }
+ HValueMap* map() { return map_; }
+ HSideEffectMap* dominators() { return &dominators_; }
+
+ GvnBasicBlockState* next_in_dominator_tree_traversal(
+ Zone* zone,
+ HBasicBlock** dominator) {
+ // This assignment needs to happen before calling next_dominated() because
+ // that call can reuse "this" if we are at the last dominated block.
+ *dominator = block();
+ GvnBasicBlockState* result = next_dominated(zone);
+ if (result == NULL) {
+ GvnBasicBlockState* dominator_state = pop();
+ if (dominator_state != NULL) {
+ // This branch is guaranteed not to return NULL because pop() never
+ // returns a state where "is_done() == true".
+ *dominator = dominator_state->block();
+ result = dominator_state->next_dominated(zone);
+ } else {
+ // Unnecessary (we are returning NULL) but done for cleanness.
+ *dominator = NULL;
+ }
+ }
+ return result;
+ }
- // If this is a loop header kill everything killed by the loop.
- if (block->IsLoopHeader()) {
- map->Kill(loop_side_effects_[block->block_id()]);
+ private:
+ void Initialize(HBasicBlock* block,
+ HValueMap* map,
+ HSideEffectMap* dominators,
+ bool copy_map,
+ Zone* zone) {
+ block_ = block;
+ map_ = copy_map ? map->Copy(zone) : map;
+ dominated_index_ = -1;
+ length_ = block->dominated_blocks()->length();
+ if (dominators != NULL) {
+ dominators_ = *dominators;
+ }
+ }
+ bool is_done() { return dominated_index_ >= length_; }
+
+ GvnBasicBlockState(GvnBasicBlockState* previous,
+ HBasicBlock* block,
+ HValueMap* map,
+ HSideEffectMap* dominators,
+ Zone* zone)
+ : previous_(previous), next_(NULL) {
+ Initialize(block, map, dominators, true, zone);
+ }
+
+ GvnBasicBlockState* next_dominated(Zone* zone) {
+ dominated_index_++;
+ if (dominated_index_ == length_ - 1) {
+ // No need to copy the map for the last child in the dominator tree.
+ Initialize(block_->dominated_blocks()->at(dominated_index_),
+ map(),
+ dominators(),
+ false,
+ zone);
+ return this;
+ } else if (dominated_index_ < length_) {
+ return push(zone,
+ block_->dominated_blocks()->at(dominated_index_),
+ dominators());
+ } else {
+ return NULL;
+ }
}
- // Go through all instructions of the current block.
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- GVNFlagSet flags = instr->ChangesFlags();
- if (!flags.IsEmpty()) {
- // Clear all instructions in the map that are affected by side effects.
- // Store instruction as the dominating one for tracked side effects.
- map->Kill(flags);
- dominators->Store(flags, instr);
- TRACE_GVN_2("Instruction %d %s\n", instr->id(),
- *GetGVNFlagsString(flags));
+ GvnBasicBlockState* push(Zone* zone,
+ HBasicBlock* block,
+ HSideEffectMap* dominators) {
+ if (next_ == NULL) {
+ next_ =
+ new(zone) GvnBasicBlockState(this, block, map(), dominators, zone);
+ } else {
+ next_->Initialize(block, map(), dominators, true, zone);
}
- if (instr->CheckFlag(HValue::kUseGVN)) {
- ASSERT(!instr->HasObservableSideEffects());
- HValue* other = map->Lookup(instr);
- if (other != NULL) {
- ASSERT(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- if (instr->HasSideEffects()) removed_side_effects_ = true;
- instr->DeleteAndReplaceWith(other);
- } else {
- map->Add(instr);
- }
+ return next_;
+ }
+ GvnBasicBlockState* pop() {
+ GvnBasicBlockState* result = previous_;
+ while (result != NULL && result->is_done()) {
+ TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
+ block()->block_id(),
+ previous_->block()->block_id())
+ result = result->previous_;
}
- if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- HValue* other = dominators->at(i);
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(depends_on_flag) &&
- (other != NULL)) {
- TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
- i,
+ return result;
+ }
+
+ GvnBasicBlockState* previous_;
+ GvnBasicBlockState* next_;
+ HBasicBlock* block_;
+ HValueMap* map_;
+ HSideEffectMap dominators_;
+ int dominated_index_;
+ int length_;
+};
+
+// This is a recursive traversal of the dominator tree but it has been turned
+// into a loop to avoid stack overflows.
+// The logical "stack frames" of the recursion are kept in a list of
+// GvnBasicBlockState instances.
+void HGlobalValueNumberer::AnalyzeGraph() {
+ HBasicBlock* entry_block = graph_->entry_block();
+ HValueMap* entry_map = new(zone()) HValueMap(zone());
+ GvnBasicBlockState* current =
+ GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
+
+ while (current != NULL) {
+ HBasicBlock* block = current->block();
+ HValueMap* map = current->map();
+ HSideEffectMap* dominators = current->dominators();
+
+ TRACE_GVN_2("Analyzing block B%d%s\n",
+ block->block_id(),
+ block->IsLoopHeader() ? " (loop header)" : "");
+
+ // If this is a loop header kill everything killed by the loop.
+ if (block->IsLoopHeader()) {
+ map->Kill(loop_side_effects_[block->block_id()]);
+ }
+
+ // Go through all instructions of the current block.
+ HInstruction* instr = block->first();
+ while (instr != NULL) {
+ HInstruction* next = instr->next();
+ GVNFlagSet flags = instr->ChangesFlags();
+ if (!flags.IsEmpty()) {
+ // Clear all instructions in the map that are affected by side effects.
+ // Store instruction as the dominating one for tracked side effects.
+ map->Kill(flags);
+ dominators->Store(flags, instr);
+ TRACE_GVN_2("Instruction %d %s\n", instr->id(),
+ *GetGVNFlagsString(flags));
+ }
+ if (instr->CheckFlag(HValue::kUseGVN)) {
+ ASSERT(!instr->HasObservableSideEffects());
+ HValue* other = map->Lookup(instr);
+ if (other != NULL) {
+ ASSERT(instr->Equals(other) && other->Equals(instr));
+ TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
- instr->SetSideEffectDominator(changes_flag, other);
+ if (instr->HasSideEffects()) removed_side_effects_ = true;
+ instr->DeleteAndReplaceWith(other);
+ } else {
+ map->Add(instr, zone());
}
}
+ if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
+ for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+ HValue* other = dominators->at(i);
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
+ if (instr->DependsOnFlags().Contains(depends_on_flag) &&
+ (other != NULL)) {
+ TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
+ i,
+ instr->id(),
+ instr->Mnemonic(),
+ other->id(),
+ other->Mnemonic());
+ instr->SetSideEffectDominator(changes_flag, other);
+ }
+ }
+ }
+ instr = next;
+ }
+
+ HBasicBlock* dominator_block;
+ GvnBasicBlockState* next =
+ current->next_in_dominator_tree_traversal(zone(), &dominator_block);
+
+ if (next != NULL) {
+ HBasicBlock* dominated = next->block();
+ HValueMap* successor_map = next->map();
+ HSideEffectMap* successor_dominators = next->dominators();
+
+ // Kill everything killed on any path between this block and the
+ // dominated block. We don't have to traverse these paths if the
+ // value map and the dominators list is already empty. If the range
+ // of block ids (block_id, dominated_id) is empty there are no such
+ // paths.
+ if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
+ dominator_block->block_id() + 1 < dominated->block_id()) {
+ visited_on_paths_.Clear();
+ GVNFlagSet side_effects_on_all_paths =
+ CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
+ dominated);
+ successor_map->Kill(side_effects_on_all_paths);
+ successor_dominators->Kill(side_effects_on_all_paths);
+ }
}
- instr = next;
- }
-
- // Recursively continue analysis for all immediately dominated blocks.
- int length = block->dominated_blocks()->length();
- for (int i = 0; i < length; ++i) {
- HBasicBlock* dominated = block->dominated_blocks()->at(i);
- // No need to copy the map for the last child in the dominator tree.
- HValueMap* successor_map = (i == length - 1) ? map : map->Copy(zone());
- HSideEffectMap successor_dominators(dominators);
-
- // Kill everything killed on any path between this block and the
- // dominated block. We don't have to traverse these paths if the
- // value map and the dominators list is already empty. If the range
- // of block ids (block_id, dominated_id) is empty there are no such
- // paths.
- if ((!successor_map->IsEmpty() || !successor_dominators.IsEmpty()) &&
- block->block_id() + 1 < dominated->block_id()) {
- visited_on_paths_.Clear();
- GVNFlagSet side_effects_on_all_paths =
- CollectSideEffectsOnPathsToDominatedBlock(block, dominated);
- successor_map->Kill(side_effects_on_all_paths);
- successor_dominators.Kill(side_effects_on_all_paths);
- }
- AnalyzeBlock(dominated, successor_map, &successor_dominators);
+ current = next;
}
}
@@ -1917,7 +2243,7 @@ class HInferRepresentation BASE_EMBEDDED {
public:
explicit HInferRepresentation(HGraph* graph)
: graph_(graph),
- worklist_(8),
+ worklist_(8, graph->zone()),
in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
void Analyze();
@@ -1929,7 +2255,7 @@ class HInferRepresentation BASE_EMBEDDED {
void AddDependantsToWorklist(HValue* current);
void InferBasedOnUses(HValue* current);
- Zone* zone() { return graph_->zone(); }
+ Zone* zone() const { return graph_->zone(); }
HGraph* graph_;
ZoneList<HValue*> worklist_;
@@ -1941,7 +2267,7 @@ void HInferRepresentation::AddToWorklist(HValue* current) {
if (current->representation().IsSpecialization()) return;
if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
if (in_worklist_.Contains(current->id())) return;
- worklist_.Add(current);
+ worklist_.Add(current, zone());
in_worklist_.Add(current->id());
}
@@ -2008,8 +2334,16 @@ Representation HInferRepresentation::TryChange(HValue* value) {
for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
- Representation rep = use->RequiredInputRepresentation(it.index());
+ Representation rep = use->ObservedInputRepresentation(it.index());
if (rep.IsNone()) continue;
+ if (FLAG_trace_representation) {
+ PrintF("%d %s is used by %d %s as %s\n",
+ value->id(),
+ value->Mnemonic(),
+ use->id(),
+ use->Mnemonic(),
+ rep.Mnemonic());
+ }
if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
use_count[rep.kind()] += use->LoopWeight();
}
@@ -2044,12 +2378,12 @@ void HInferRepresentation::Analyze() {
// bit-vector of length <number of phis>.
const ZoneList<HPhi*>* phi_list = graph_->phi_list();
int phi_count = phi_list->length();
- ZoneList<BitVector*> connected_phis(phi_count);
+ ZoneList<BitVector*> connected_phis(phi_count, graph_->zone());
for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->InitRealUses(i);
BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone());
connected_set->Add(i);
- connected_phis.Add(connected_set);
+ connected_phis.Add(connected_set, zone());
}
// (2) Do a fixed point iteration to find the set of connected phis. A
@@ -2073,21 +2407,34 @@ void HInferRepresentation::Analyze() {
}
}
- // (3) Use the phi reachability information from step 2 to
- // (a) sum up the non-phi use counts of all connected phis.
- // (b) push information about values which can't be converted to integer
- // without deoptimization through the phi use-def chains, avoiding
- // unnecessary deoptimizations later.
+ // (3a) Use the phi reachability information from step 2 to
+ // push information about values which can't be converted to integer
+ // without deoptimization through the phi use-def chains, avoiding
+ // unnecessary deoptimizations later.
for (int i = 0; i < phi_count; ++i) {
HPhi* phi = phi_list->at(i);
bool cti = phi->AllOperandsConvertibleToInteger();
+ if (cti) continue;
+
+ for (BitVector::Iterator it(connected_phis.at(i));
+ !it.Done();
+ it.Advance()) {
+ HPhi* phi = phi_list->at(it.Current());
+ phi->set_is_convertible_to_integer(false);
+ phi->ResetInteger32Uses();
+ }
+ }
+
+ // (3b) Use the phi reachability information from step 2 to
+ // sum up the non-phi use counts of all connected phis.
+ for (int i = 0; i < phi_count; ++i) {
+ HPhi* phi = phi_list->at(i);
for (BitVector::Iterator it(connected_phis.at(i));
!it.Done();
it.Advance()) {
int index = it.Current();
- HPhi* it_use = phi_list->at(it.Current());
- if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice!
- if (!cti) it_use->set_is_convertible_to_integer(false);
+ HPhi* it_use = phi_list->at(index);
+ if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice.
}
}
@@ -2145,9 +2492,9 @@ void HGraph::InitializeInferredTypes(int from_inclusive, int to_inclusive) {
i = last_back_edge->block_id();
// Update phis of the loop header now after the whole loop body is
// guaranteed to be processed.
- ZoneList<HValue*> worklist(block->phis()->length());
+ ZoneList<HValue*> worklist(block->phis()->length(), zone());
for (int j = 0; j < block->phis()->length(); ++j) {
- worklist.Add(block->phis()->at(j));
+ worklist.Add(block->phis()->at(j), zone());
}
InferTypes(&worklist);
}
@@ -2170,8 +2517,8 @@ void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
break;
}
- // For multiplication and division, we must propagate to the left and
- // the right side.
+ // For multiplication, division, and Math.min/max(), we must propagate
+ // to the left and the right side.
if (current->IsMul()) {
HMul* mul = HMul::cast(current);
mul->EnsureAndPropagateNotMinusZero(visited);
@@ -2182,6 +2529,11 @@ void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
div->EnsureAndPropagateNotMinusZero(visited);
PropagateMinusZeroChecks(div->left(), visited);
PropagateMinusZeroChecks(div->right(), visited);
+ } else if (current->IsMathMinMax()) {
+ HMathMinMax* minmax = HMathMinMax::cast(current);
+ visited->Add(minmax->id());
+ PropagateMinusZeroChecks(minmax->left(), visited);
+ PropagateMinusZeroChecks(minmax->right(), visited);
}
current = current->EnsureAndPropagateNotMinusZero(visited);
@@ -2214,8 +2566,8 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
new_value = is_truncating
- ? constant->CopyToTruncatedInt32()
- : constant->CopyToRepresentation(to);
+ ? constant->CopyToTruncatedInt32(zone())
+ : constant->CopyToRepresentation(to, zone());
}
if (new_value == NULL) {
@@ -2331,6 +2683,229 @@ void HGraph::MarkDeoptimizeOnUndefined() {
}
+// Discover instructions that can be marked with kUint32 flag allowing
+// them to produce full range uint32 values.
+class Uint32Analysis BASE_EMBEDDED {
+ public:
+ explicit Uint32Analysis(Zone* zone) : zone_(zone), phis_(4, zone) { }
+
+ void Analyze(HInstruction* current);
+
+ void UnmarkUnsafePhis();
+
+ private:
+ bool IsSafeUint32Use(HValue* val, HValue* use);
+ bool Uint32UsesAreSafe(HValue* uint32val);
+ bool CheckPhiOperands(HPhi* phi);
+ void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist);
+
+ Zone* zone_;
+ ZoneList<HPhi*> phis_;
+};
+
+
+bool Uint32Analysis::IsSafeUint32Use(HValue* val, HValue* use) {
+ // Operations that operatate on bits are safe.
+ if (use->IsBitwise() ||
+ use->IsShl() ||
+ use->IsSar() ||
+ use->IsShr() ||
+ use->IsBitNot()) {
+ return true;
+ } else if (use->IsChange() || use->IsSimulate()) {
+ // Conversions and deoptimization have special support for unt32.
+ return true;
+ } else if (use->IsStoreKeyed()) {
+ HStoreKeyed* store = HStoreKeyed::cast(use);
+ if (store->is_external()) {
+ // Storing a value into an external integer array is a bit level
+ // operation.
+ if (store->value() == val) {
+ // Clamping or a conversion to double should have beed inserted.
+ ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+
+// Iterate over all uses and verify that they are uint32 safe: either don't
+// distinguish between int32 and uint32 due to their bitwise nature or
+// have special support for uint32 values.
+// Encountered phis are optimisitically treated as safe uint32 uses,
+// marked with kUint32 flag and collected in the phis_ list. A separate
+// path will be performed later by UnmarkUnsafePhis to clear kUint32 from
+// phis that are not actually uint32-safe (it requries fix point iteration).
+bool Uint32Analysis::Uint32UsesAreSafe(HValue* uint32val) {
+ bool collect_phi_uses = false;
+ for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+
+ if (use->IsPhi()) {
+ if (!use->CheckFlag(HInstruction::kUint32)) {
+ // There is a phi use of this value from a phis that is not yet
+ // collected in phis_ array. Separate pass is required.
+ collect_phi_uses = true;
+ }
+
+ // Optimistically treat phis as uint32 safe.
+ continue;
+ }
+
+ if (!IsSafeUint32Use(uint32val, use)) {
+ return false;
+ }
+ }
+
+ if (collect_phi_uses) {
+ for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+
+ // There is a phi use of this value from a phis that is not yet
+ // collected in phis_ array. Separate pass is required.
+ if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
+ use->SetFlag(HInstruction::kUint32);
+ phis_.Add(HPhi::cast(use), zone_);
+ }
+ }
+ }
+
+ return true;
+}
+
+
+// Analyze instruction and mark it with kUint32 if all its uses are uint32
+// safe.
+void Uint32Analysis::Analyze(HInstruction* current) {
+ if (Uint32UsesAreSafe(current)) current->SetFlag(HInstruction::kUint32);
+}
+
+
+// Check if all operands to the given phi are marked with kUint32 flag.
+bool Uint32Analysis::CheckPhiOperands(HPhi* phi) {
+ if (!phi->CheckFlag(HInstruction::kUint32)) {
+ // This phi is not uint32 safe. No need to check operands.
+ return false;
+ }
+
+ for (int j = 0; j < phi->OperandCount(); j++) {
+ HValue* operand = phi->OperandAt(j);
+ if (!operand->CheckFlag(HInstruction::kUint32)) {
+ // Lazyly mark constants that fit into uint32 range with kUint32 flag.
+ if (operand->IsConstant() &&
+ HConstant::cast(operand)->IsUint32()) {
+ operand->SetFlag(HInstruction::kUint32);
+ continue;
+ }
+
+ // This phi is not safe, some operands are not uint32 values.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+// Remove kUint32 flag from the phi itself and its operands. If any operand
+// was a phi marked with kUint32 place it into a worklist for
+// transitive clearing of kUint32 flag.
+void Uint32Analysis::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
+ phi->ClearFlag(HInstruction::kUint32);
+ for (int j = 0; j < phi->OperandCount(); j++) {
+ HValue* operand = phi->OperandAt(j);
+ if (operand->CheckFlag(HInstruction::kUint32)) {
+ operand->ClearFlag(HInstruction::kUint32);
+ if (operand->IsPhi()) {
+ worklist->Add(HPhi::cast(operand), zone_);
+ }
+ }
+ }
+}
+
+
+void Uint32Analysis::UnmarkUnsafePhis() {
+ // No phis were collected. Nothing to do.
+ if (phis_.length() == 0) return;
+
+ // Worklist used to transitively clear kUint32 from phis that
+ // are used as arguments to other phis.
+ ZoneList<HPhi*> worklist(phis_.length(), zone_);
+
+ // Phi can be used as a uint32 value if and only if
+ // all its operands are uint32 values and all its
+ // uses are uint32 safe.
+
+ // Iterate over collected phis and unmark those that
+ // are unsafe. When unmarking phi unmark its operands
+ // and add it to the worklist if it is a phi as well.
+ // Phis that are still marked as safe are shifted down
+ // so that all safe phis form a prefix of the phis_ array.
+ int phi_count = 0;
+ for (int i = 0; i < phis_.length(); i++) {
+ HPhi* phi = phis_[i];
+
+ if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
+ phis_[phi_count++] = phi;
+ } else {
+ UnmarkPhi(phi, &worklist);
+ }
+ }
+
+ // Now phis array contains only those phis that have safe
+ // non-phi uses. Start transitively clearing kUint32 flag
+ // from phi operands of discovered non-safe phies until
+ // only safe phies are left.
+ while (!worklist.is_empty()) {
+ while (!worklist.is_empty()) {
+ HPhi* phi = worklist.RemoveLast();
+ UnmarkPhi(phi, &worklist);
+ }
+
+ // Check if any operands to safe phies were unmarked
+ // turning a safe phi into unsafe. The same value
+ // can flow into several phis.
+ int new_phi_count = 0;
+ for (int i = 0; i < phi_count; i++) {
+ HPhi* phi = phis_[i];
+
+ if (CheckPhiOperands(phi)) {
+ phis_[new_phi_count++] = phi;
+ } else {
+ UnmarkPhi(phi, &worklist);
+ }
+ }
+ phi_count = new_phi_count;
+ }
+}
+
+
+void HGraph::ComputeSafeUint32Operations() {
+ if (!FLAG_opt_safe_uint32_operations || uint32_instructions_ == NULL) {
+ return;
+ }
+
+ Uint32Analysis analysis(zone());
+ for (int i = 0; i < uint32_instructions_->length(); ++i) {
+ HInstruction* current = uint32_instructions_->at(i);
+ if (current->IsLinked() && current->representation().IsInteger32()) {
+ analysis.Analyze(current);
+ }
+ }
+
+ // Some phis might have been optimistically marked with kUint32 flag.
+ // Remove this flag from those phis that are unsafe and propagate
+ // this information transitively potentially clearing kUint32 flag
+ // from some non-phi operations that are used as operands to unsafe phis.
+ analysis.UnmarkUnsafePhis();
+}
+
+
void HGraph::ComputeMinusZeroChecks() {
BitVector visited(GetMaximumValueID(), zone());
for (int i = 0; i < blocks_.length(); ++i) {
@@ -2360,12 +2935,12 @@ void HGraph::ComputeMinusZeroChecks() {
FunctionState::FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
- ReturnHandlingFlag return_handling)
+ InliningKind inlining_kind)
: owner_(owner),
compilation_info_(info),
oracle_(oracle),
call_context_(NULL),
- return_handling_(return_handling),
+ inlining_kind_(inlining_kind),
function_return_(NULL),
test_context_(NULL),
entry_(NULL),
@@ -2378,10 +2953,13 @@ FunctionState::FunctionState(HGraphBuilder* owner,
HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
if_true->MarkAsInlineReturnTarget();
if_false->MarkAsInlineReturnTarget();
- Expression* cond = TestContext::cast(owner->ast_context())->condition();
+ TestContext* outer_test_context = TestContext::cast(owner->ast_context());
+ Expression* cond = outer_test_context->condition();
+ TypeFeedbackOracle* outer_oracle = outer_test_context->oracle();
// The AstContext constructor pushed on the context stack. This newed
// instance is the reason that AstContext can't be BASE_EMBEDDED.
- test_context_ = new TestContext(owner, cond, if_true, if_false);
+ test_context_ =
+ new TestContext(owner, cond, outer_oracle, if_true, if_false);
} else {
function_return_ = owner->graph()->CreateBasicBlock();
function_return()->MarkAsInlineReturnTarget();
@@ -2457,14 +3035,15 @@ void TestContext::ReturnValue(HValue* value) {
}
-void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
}
-void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+void EffectContext::ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
@@ -2476,7 +3055,7 @@ void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
}
-void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
return owner()->Bailout("bad value context for arguments object value");
@@ -2487,7 +3066,7 @@ void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
}
-void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
return owner()->Bailout("bad value context for arguments object value");
@@ -2507,7 +3086,7 @@ void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
}
-void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
HGraphBuilder* builder = owner();
builder->AddInstruction(instr);
@@ -2522,7 +3101,7 @@ void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
}
-void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
@@ -2546,8 +3125,8 @@ void TestContext::BuildBranch(HValue* value) {
}
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- unsigned test_id = condition()->test_id();
- ToBooleanStub::Types expected(builder->oracle()->ToBooleanTypes(test_id));
+ TypeFeedbackId test_id = condition()->test_id();
+ ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
builder->current_block()->Finish(test);
@@ -2573,11 +3152,7 @@ void TestContext::BuildBranch(HValue* value) {
void HGraphBuilder::Bailout(const char* reason) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *name, reason);
- }
+ info()->set_bailout_reason(reason);
SetStackOverflow();
}
@@ -2605,17 +3180,14 @@ void HGraphBuilder::VisitForTypeOf(Expression* expr) {
void HGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
- TestContext for_test(this, expr, true_block, false_block);
+ TestContext for_test(this, expr, oracle(), true_block, false_block);
Visit(expr);
}
-HValue* HGraphBuilder::VisitArgument(Expression* expr) {
- VisitForValue(expr);
- if (HasStackOverflow() || current_block() == NULL) return NULL;
- HValue* value = Pop();
- Push(AddInstruction(new(zone()) HPushArgument(value)));
- return value;
+void HGraphBuilder::VisitArgument(Expression* expr) {
+ CHECK_ALIVE(VisitForValue(expr));
+ Push(AddInstruction(new(zone()) HPushArgument(Pop())));
}
@@ -2670,7 +3242,7 @@ HGraph* HGraphBuilder::CreateGraph() {
HEnvironment* initial_env = environment()->CopyWithoutHistory();
HBasicBlock* body_entry = CreateBasicBlock(initial_env);
current_block()->Goto(body_entry);
- body_entry->SetJoinId(AstNode::kFunctionEntryId);
+ body_entry->SetJoinId(BailoutId::FunctionEntry());
set_current_block(body_entry);
// Handle implicit declaration of the function name in named function
@@ -2679,7 +3251,7 @@ HGraph* HGraphBuilder::CreateGraph() {
VisitVariableDeclaration(scope->function());
}
VisitDeclarations(scope->declarations());
- AddSimulate(AstNode::kDeclarationsId);
+ AddSimulate(BailoutId::Declarations());
HValue* context = environment()->LookupContext();
AddInstruction(
@@ -2693,50 +3265,78 @@ HGraph* HGraphBuilder::CreateGraph() {
current_block()->FinishExit(instr);
set_current_block(NULL);
}
+
+ // If the checksum of the number of type info changes is the same as the
+ // last time this function was compiled, then this recompile is likely not
+ // due to missing/inadequate type feedback, but rather too aggressive
+ // optimization. Disable optimistic LICM in that case.
+ Handle<Code> unoptimized_code(info()->shared_info()->code());
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
+ Handle<TypeFeedbackInfo> type_info(
+ Handle<TypeFeedbackInfo>::cast(maybe_type_info));
+ int checksum = type_info->own_type_change_checksum();
+ int composite_checksum = graph()->update_type_change_checksum(checksum);
+ graph()->set_use_optimistic_licm(
+ !type_info->matches_inlined_type_change_checksum(composite_checksum));
+ type_info->set_inlined_type_change_checksum(composite_checksum);
}
- graph()->OrderBlocks();
- graph()->AssignDominators();
+ return graph();
+}
+
+bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
+ *bailout_reason = SmartArrayPointer<char>();
+ OrderBlocks();
+ AssignDominators();
#ifdef DEBUG
// Do a full verify after building the graph and computing dominators.
- graph()->Verify(true);
+ Verify(true);
#endif
- graph()->PropagateDeoptimizingMark();
- if (!graph()->CheckConstPhiUses()) {
- Bailout("Unsupported phi use of const variable");
- return NULL;
+ PropagateDeoptimizingMark();
+ if (!CheckConstPhiUses()) {
+ *bailout_reason = SmartArrayPointer<char>(StrDup(
+ "Unsupported phi use of const variable"));
+ return false;
}
- graph()->EliminateRedundantPhis();
- if (!graph()->CheckArgumentsPhiUses()) {
- Bailout("Unsupported phi use of arguments");
- return NULL;
+ EliminateRedundantPhis();
+ if (!CheckArgumentsPhiUses()) {
+ *bailout_reason = SmartArrayPointer<char>(StrDup(
+ "Unsupported phi use of arguments"));
+ return false;
}
- if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
- graph()->CollectPhis();
+ if (FLAG_eliminate_dead_phis) EliminateUnreachablePhis();
+ CollectPhis();
- if (graph()->has_osr_loop_entry()) {
- const ZoneList<HPhi*>* phis = graph()->osr_loop_entry()->phis();
+ if (has_osr_loop_entry()) {
+ const ZoneList<HPhi*>* phis = osr_loop_entry()->phis();
for (int j = 0; j < phis->length(); j++) {
HPhi* phi = phis->at(j);
- graph()->osr_values()->at(phi->merged_index())->set_incoming_value(phi);
+ osr_values()->at(phi->merged_index())->set_incoming_value(phi);
}
}
- HInferRepresentation rep(graph());
+ HInferRepresentation rep(this);
rep.Analyze();
- graph()->MarkDeoptimizeOnUndefined();
- graph()->InsertRepresentationChanges();
+ MarkDeoptimizeOnUndefined();
+ InsertRepresentationChanges();
+
+ InitializeInferredTypes();
- graph()->InitializeInferredTypes();
- graph()->Canonicalize();
+ // Must be performed before canonicalization to ensure that Canonicalize
+ // will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
+ // zero.
+ ComputeSafeUint32Operations();
+
+ Canonicalize();
// Perform common subexpression elimination and loop-invariant code motion.
if (FLAG_use_gvn) {
- HPhase phase("H_Global value numbering", graph());
- HGlobalValueNumberer gvn(graph(), info());
+ HPhase phase("H_Global value numbering", this);
+ HGlobalValueNumberer gvn(this, info());
bool removed_side_effects = gvn.Analyze();
// Trigger a second analysis pass to further eliminate duplicate values that
// could only be discovered by removing side-effect-generating instructions
@@ -2748,19 +3348,20 @@ HGraph* HGraphBuilder::CreateGraph() {
}
if (FLAG_use_range) {
- HRangeAnalysis rangeAnalysis(graph());
+ HRangeAnalysis rangeAnalysis(this);
rangeAnalysis.Analyze();
}
- graph()->ComputeMinusZeroChecks();
+ ComputeMinusZeroChecks();
// Eliminate redundant stack checks on backwards branches.
- HStackCheckEliminator sce(graph());
+ HStackCheckEliminator sce(this);
sce.Process();
- graph()->EliminateRedundantBoundsChecks();
- graph()->DehoistSimpleArrayIndexComputations();
+ EliminateRedundantBoundsChecks();
+ DehoistSimpleArrayIndexComputations();
+ if (FLAG_dead_code_elimination) DeadCodeElimination();
- return graph();
+ return true;
}
@@ -2785,6 +3386,8 @@ class BoundsCheckKey : public ZoneObject {
static BoundsCheckKey* Create(Zone* zone,
HBoundsCheck* check,
int32_t* offset) {
+ if (!check->index()->representation().IsInteger32()) return NULL;
+
HValue* index_base = NULL;
HConstant* constant = NULL;
bool is_sub = false;
@@ -2851,7 +3454,8 @@ class BoundsCheckBbData: public ZoneObject {
int32_t LowerOffset() const { return lower_offset_; }
int32_t UpperOffset() const { return upper_offset_; }
HBasicBlock* BasicBlock() const { return basic_block_; }
- HBoundsCheck* Check() const { return check_; }
+ HBoundsCheck* LowerCheck() const { return lower_check_; }
+ HBoundsCheck* UpperCheck() const { return upper_check_; }
BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
@@ -2859,76 +3463,85 @@ class BoundsCheckBbData: public ZoneObject {
return offset >= LowerOffset() && offset <= UpperOffset();
}
- // This method removes new_check and modifies the current check so that it
- // also "covers" what new_check covered.
- // The obvious precondition is that new_check follows Check() in the
- // same basic block, and that new_offset is not covered (otherwise we
- // could simply remove new_check).
- // As a consequence LowerOffset() or UpperOffset() change (the covered
+ bool HasSingleCheck() { return lower_check_ == upper_check_; }
+
+ // The goal of this method is to modify either upper_offset_ or
+ // lower_offset_ so that also new_offset is covered (the covered
// range grows).
//
- // In the general case the check covering the current range should be like
- // these two checks:
- // 0 <= Key()->IndexBase() + LowerOffset()
- // Key()->IndexBase() + UpperOffset() < Key()->Length()
- //
- // We can transform the second check like this:
- // Key()->IndexBase() + LowerOffset() <
- // Key()->Length() + (LowerOffset() - UpperOffset())
- // so we can handle both checks with a single unsigned comparison.
+ // The precondition is that new_check follows UpperCheck() and
+ // LowerCheck() in the same basic block, and that new_offset is not
+ // covered (otherwise we could simply remove new_check).
//
- // The bulk of this method changes Check()->index() and Check()->length()
- // replacing them with new HAdd instructions to perform the transformation
- // described above.
+ // If HasSingleCheck() is true then new_check is added as "second check"
+ // (either upper or lower; note that HasSingleCheck() becomes false).
+ // Otherwise one of the current checks is modified so that it also covers
+ // new_offset, and new_check is removed.
void CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
ASSERT(new_check->index()->representation().IsInteger32());
+ bool keep_new_check = false;
if (new_offset > upper_offset_) {
upper_offset_ = new_offset;
+ if (HasSingleCheck()) {
+ keep_new_check = true;
+ upper_check_ = new_check;
+ } else {
+ BuildOffsetAdd(upper_check_,
+ &added_upper_index_,
+ &added_upper_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ upper_check_->SetOperandAt(0, added_upper_index_);
+ }
} else if (new_offset < lower_offset_) {
lower_offset_ = new_offset;
+ if (HasSingleCheck()) {
+ keep_new_check = true;
+ lower_check_ = new_check;
+ } else {
+ BuildOffsetAdd(lower_check_,
+ &added_lower_index_,
+ &added_lower_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ lower_check_->SetOperandAt(0, added_lower_index_);
+ }
} else {
ASSERT(false);
}
- BuildOffsetAdd(&added_index_,
- &added_index_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- lower_offset_);
- Check()->SetOperandAt(0, added_index_);
- BuildOffsetAdd(&added_length_,
- &added_length_offset_,
- Key()->Length(),
- new_check->length()->representation(),
- lower_offset_ - upper_offset_);
- Check()->SetOperandAt(1, added_length_);
-
- new_check->DeleteAndReplaceWith(NULL);
+ if (!keep_new_check) {
+ new_check->DeleteAndReplaceWith(NULL);
+ }
}
void RemoveZeroOperations() {
- RemoveZeroAdd(&added_index_, &added_index_offset_);
- RemoveZeroAdd(&added_length_, &added_length_offset_);
+ RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
+ RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
}
BoundsCheckBbData(BoundsCheckKey* key,
int32_t lower_offset,
int32_t upper_offset,
HBasicBlock* bb,
- HBoundsCheck* check,
+ HBoundsCheck* lower_check,
+ HBoundsCheck* upper_check,
BoundsCheckBbData* next_in_bb,
BoundsCheckBbData* father_in_dt)
: key_(key),
lower_offset_(lower_offset),
upper_offset_(upper_offset),
basic_block_(bb),
- check_(check),
- added_index_offset_(NULL),
- added_index_(NULL),
- added_length_offset_(NULL),
- added_length_(NULL),
+ lower_check_(lower_check),
+ upper_check_(upper_check),
+ added_lower_index_(NULL),
+ added_lower_offset_(NULL),
+ added_upper_index_(NULL),
+ added_upper_offset_(NULL),
next_in_bb_(next_in_bb),
father_in_dt_(father_in_dt) { }
@@ -2937,29 +3550,33 @@ class BoundsCheckBbData: public ZoneObject {
int32_t lower_offset_;
int32_t upper_offset_;
HBasicBlock* basic_block_;
- HBoundsCheck* check_;
- HConstant* added_index_offset_;
- HAdd* added_index_;
- HConstant* added_length_offset_;
- HAdd* added_length_;
+ HBoundsCheck* lower_check_;
+ HBoundsCheck* upper_check_;
+ HAdd* added_lower_index_;
+ HConstant* added_lower_offset_;
+ HAdd* added_upper_index_;
+ HConstant* added_upper_offset_;
BoundsCheckBbData* next_in_bb_;
BoundsCheckBbData* father_in_dt_;
- void BuildOffsetAdd(HAdd** add,
+ void BuildOffsetAdd(HBoundsCheck* check,
+ HAdd** add,
HConstant** constant,
HValue* original_value,
Representation representation,
int32_t new_offset) {
HConstant* new_constant = new(BasicBlock()->zone())
- HConstant(Handle<Object>(Smi::FromInt(new_offset)),
- Representation::Integer32());
+ HConstant(new_offset, Representation::Integer32());
if (*add == NULL) {
- new_constant->InsertBefore(Check());
- *add = new(BasicBlock()->zone()) HAdd(NULL,
+ new_constant->InsertBefore(check);
+ // Because of the bounds checks elimination algorithm, the index is always
+ // an HAdd or an HSub here, so we can safely cast to an HBinaryOperation.
+ HValue* context = HBinaryOperation::cast(check->index())->context();
+ *add = new(BasicBlock()->zone()) HAdd(context,
original_value,
new_constant);
(*add)->AssumeRepresentation(representation);
- (*add)->InsertBefore(Check());
+ (*add)->InsertBefore(check);
} else {
new_constant->InsertBefore(*add);
(*constant)->DeleteAndReplaceWith(new_constant);
@@ -2985,20 +3602,22 @@ static bool BoundsCheckKeyMatch(void* key1, void* key2) {
class BoundsCheckTable : private ZoneHashMap {
public:
- BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key) {
+ BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone) {
return reinterpret_cast<BoundsCheckBbData**>(
- &(Lookup(key, key->Hash(), true)->value));
+ &(Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value));
}
- void Insert(BoundsCheckKey* key, BoundsCheckBbData* data) {
- Lookup(key, key->Hash(), true)->value = data;
+ void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone) {
+ Lookup(key, key->Hash(), true, ZoneAllocationPolicy(zone))->value = data;
}
void Delete(BoundsCheckKey* key) {
Remove(key, key->Hash());
}
- BoundsCheckTable() : ZoneHashMap(BoundsCheckKeyMatch) { }
+ explicit BoundsCheckTable(Zone* zone)
+ : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)) { }
};
@@ -3021,8 +3640,9 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
int32_t offset;
BoundsCheckKey* key =
- BoundsCheckKey::Create(bb->zone(), check, &offset);
- BoundsCheckBbData** data_p = table->LookupOrInsert(key);
+ BoundsCheckKey::Create(zone(), check, &offset);
+ if (key == NULL) continue;
+ BoundsCheckBbData** data_p = table->LookupOrInsert(key, zone());
BoundsCheckBbData* data = *data_p;
if (data == NULL) {
bb_data_list = new(zone()) BoundsCheckBbData(key,
@@ -3030,6 +3650,7 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
offset,
bb,
check,
+ check,
bb_data_list,
NULL);
*data_p = bb_data_list;
@@ -3044,14 +3665,15 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
int32_t new_upper_offset = offset > data->UpperOffset()
? offset
: data->UpperOffset();
- bb_data_list = new(bb->zone()) BoundsCheckBbData(key,
- new_lower_offset,
- new_upper_offset,
- bb,
- check,
- bb_data_list,
- data);
- table->Insert(key, bb_data_list);
+ bb_data_list = new(zone()) BoundsCheckBbData(key,
+ new_lower_offset,
+ new_upper_offset,
+ bb,
+ data->LowerCheck(),
+ data->UpperCheck(),
+ bb_data_list,
+ data);
+ table->Insert(key, bb_data_list, zone());
}
}
@@ -3064,7 +3686,7 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
data = data->NextInBasicBlock()) {
data->RemoveZeroOperations();
if (data->FatherInDominatorTree()) {
- table->Insert(data->Key(), data->FatherInDominatorTree());
+ table->Insert(data->Key(), data->FatherInDominatorTree(), zone());
} else {
table->Delete(data->Key());
}
@@ -3074,14 +3696,14 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
void HGraph::EliminateRedundantBoundsChecks() {
HPhase phase("H_Eliminate bounds checks", this);
- AssertNoAllocation no_gc;
- BoundsCheckTable checks_table;
+ BoundsCheckTable checks_table(zone());
EliminateRedundantBoundsChecks(entry_block(), &checks_table);
}
static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
HValue* index = array_operation->GetKey();
+ if (!index->representation().IsInteger32()) return;
HConstant* constant;
HValue* subexpression;
@@ -3136,27 +3758,11 @@ void HGraph::DehoistSimpleArrayIndexComputations() {
instr != NULL;
instr = instr->next()) {
ArrayInstructionInterface* array_instruction = NULL;
- if (instr->IsLoadKeyedFastElement()) {
- HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsLoadKeyedFastDoubleElement()) {
- HLoadKeyedFastDoubleElement* op =
- HLoadKeyedFastDoubleElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsLoadKeyedSpecializedArrayElement()) {
- HLoadKeyedSpecializedArrayElement* op =
- HLoadKeyedSpecializedArrayElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyedFastElement()) {
- HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr);
+ if (instr->IsLoadKeyed()) {
+ HLoadKeyed* op = HLoadKeyed::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyedFastDoubleElement()) {
- HStoreKeyedFastDoubleElement* op =
- HStoreKeyedFastDoubleElement::cast(instr);
- array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyedSpecializedArrayElement()) {
- HStoreKeyedSpecializedArrayElement* op =
- HStoreKeyedSpecializedArrayElement::cast(instr);
+ } else if (instr->IsStoreKeyed()) {
+ HStoreKeyed* op = HStoreKeyed::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else {
continue;
@@ -3167,6 +3773,36 @@ void HGraph::DehoistSimpleArrayIndexComputations() {
}
+void HGraph::DeadCodeElimination() {
+ HPhase phase("H_Dead code elimination", this);
+ ZoneList<HInstruction*> worklist(blocks_.length(), zone());
+ for (int i = 0; i < blocks()->length(); ++i) {
+ for (HInstruction* instr = blocks()->at(i)->first();
+ instr != NULL;
+ instr = instr->next()) {
+ if (instr->IsDead()) worklist.Add(instr, zone());
+ }
+ }
+
+ while (!worklist.is_empty()) {
+ HInstruction* instr = worklist.RemoveLast();
+ if (FLAG_trace_dead_code_elimination) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ instr->PrintNameTo(&stream);
+ stream.Add(" = ");
+ instr->PrintTo(&stream);
+ PrintF("[removing dead instruction %s]\n", *stream.ToCString());
+ }
+ instr->DeleteAndReplaceWith(NULL);
+ for (int i = 0; i < instr->OperandCount(); ++i) {
+ HValue* operand = instr->OperandAt(i);
+ if (operand->IsDead()) worklist.Add(HInstruction::cast(operand), zone());
+ }
+ }
+}
+
+
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
current_block()->AddInstruction(instr);
@@ -3174,7 +3810,7 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
}
-void HGraphBuilder::AddSimulate(int ast_id) {
+void HGraphBuilder::AddSimulate(BailoutId ast_id) {
ASSERT(current_block() != NULL);
current_block()->AddSimulate(ast_id);
}
@@ -3195,9 +3831,9 @@ void HGraphBuilder::PushAndAdd(HInstruction* instr) {
template <class Instruction>
HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
- ZoneList<HValue*> arguments(count);
+ ZoneList<HValue*> arguments(count, zone());
for (int i = 0; i < count; ++i) {
- arguments.Add(Pop());
+ arguments.Add(Pop(), zone());
}
while (!arguments.is_empty()) {
@@ -3418,28 +4054,29 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ FunctionState* state = function_state();
AstContext* context = call_context();
if (context == NULL) {
// Not an inlined return, so an actual one.
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* result = environment()->Pop();
current_block()->FinishExit(new(zone()) HReturn(result));
- } else if (function_state()->is_construct()) {
- // Return from an inlined construct call. In a test context the return
- // value will always evaluate to true, in a value context the return value
- // needs to be a JSObject.
+ } else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
+ // Return from an inlined construct call. In a test context the return value
+ // will always evaluate to true, in a value context the return value needs
+ // to be a JSObject.
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(test->if_true(), function_state());
+ current_block()->Goto(test->if_true(), state);
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), function_state());
+ current_block()->Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = Pop();
- HValue* receiver = environment()->Lookup(0);
+ HValue* receiver = environment()->arguments_environment()->Lookup(0);
HHasInstanceTypeAndBranch* typecheck =
new(zone()) HHasInstanceTypeAndBranch(return_value,
FIRST_SPEC_OBJECT_TYPE,
@@ -3449,31 +4086,36 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
typecheck->SetSuccessorAt(0, if_spec_object);
typecheck->SetSuccessorAt(1, not_spec_object);
current_block()->Finish(typecheck);
- if_spec_object->AddLeaveInlined(return_value,
- function_return(),
- function_state());
- not_spec_object->AddLeaveInlined(receiver,
- function_return(),
- function_state());
+ if_spec_object->AddLeaveInlined(return_value, state);
+ not_spec_object->AddLeaveInlined(receiver, state);
+ }
+ } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
+ // Return from an inlined setter call. The returned value is never used, the
+ // value of an assignment is always the value of the RHS of the assignment.
+ CHECK_ALIVE(VisitForEffect(stmt->expression()));
+ if (context->IsTest()) {
+ HValue* rhs = environment()->arguments_environment()->Lookup(1);
+ context->ReturnValue(rhs);
+ } else if (context->IsEffect()) {
+ current_block()->Goto(function_return(), state);
+ } else {
+ ASSERT(context->IsValue());
+ HValue* rhs = environment()->arguments_environment()->Lookup(1);
+ current_block()->AddLeaveInlined(rhs, state);
}
} else {
- // Return from an inlined function, visit the subexpression in the
+ // Return from a normal inlined function. Visit the subexpression in the
// expression context of the call.
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
- VisitForControl(stmt->expression(),
- test->if_true(),
- test->if_false());
+ VisitForControl(stmt->expression(), test->if_true(), test->if_false());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), function_state());
+ current_block()->Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
- HValue* return_value = Pop();
- current_block()->AddLeaveInlined(return_value,
- function_return(),
- function_state());
+ current_block()->AddLeaveInlined(Pop(), state);
}
}
set_current_block(NULL);
@@ -3548,7 +4190,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
// 2. Build all the tests, with dangling true branches
- int default_id = AstNode::kNoNumber;
+ BailoutId default_id = BailoutId::None();
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
if (clause->is_default()) {
@@ -3601,9 +4243,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
HBasicBlock* last_block = current_block();
if (not_string_block != NULL) {
- int join_id = (default_id != AstNode::kNoNumber)
- ? default_id
- : stmt->ExitId();
+ BailoutId join_id = !default_id.IsNone() ? default_id : stmt->ExitId();
last_block = CreateJoin(last_block, not_string_block, join_id);
}
@@ -3693,17 +4333,17 @@ bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
non_osr_entry->Goto(loop_predecessor);
set_current_block(osr_entry);
- int osr_entry_id = statement->OsrEntryId();
+ BailoutId osr_entry_id = statement->OsrEntryId();
int first_expression_index = environment()->first_expression_index();
int length = environment()->length();
ZoneList<HUnknownOSRValue*>* osr_values =
- new(zone()) ZoneList<HUnknownOSRValue*>(length);
+ new(zone()) ZoneList<HUnknownOSRValue*>(length, zone());
for (int i = 0; i < first_expression_index; ++i) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Bind(i, osr_value);
- osr_values->Add(osr_value);
+ osr_values->Add(osr_value, zone());
}
if (first_expression_index != length) {
@@ -3712,7 +4352,7 @@ bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Push(osr_value);
- osr_values->Add(osr_value);
+ osr_values->Add(osr_value, zone());
}
}
@@ -3917,15 +4557,14 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
map,
DescriptorArray::kEnumCacheBridgeCacheIndex));
- HInstruction* array_length = AddInstruction(
- new(zone()) HFixedArrayBaseLength(array));
+ HInstruction* enum_length = AddInstruction(new(zone()) HMapEnumLength(map));
HInstruction* start_index = AddInstruction(new(zone()) HConstant(
Handle<Object>(Smi::FromInt(0)), Representation::Integer32()));
Push(map);
Push(array);
- Push(array_length);
+ Push(enum_length);
Push(start_index);
HInstruction* index_cache = AddInstruction(
@@ -3963,10 +4602,11 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
set_current_block(loop_body);
HValue* key = AddInstruction(
- new(zone()) HLoadKeyedFastElement(
+ new(zone()) HLoadKeyed(
environment()->ExpressionStackAt(2), // Enum cache.
environment()->ExpressionStackAt(0), // Iteration index.
- HLoadKeyedFastElement::OMIT_HOLE_CHECK));
+ environment()->ExpressionStackAt(0),
+ FAST_ELEMENTS));
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
@@ -4121,8 +4761,7 @@ HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
}
Handle<GlobalObject> global(info()->global_object());
global->Lookup(*var->name(), lookup);
- if (!lookup->IsFound() ||
- lookup->type() != NORMAL ||
+ if (!lookup->IsNormal() ||
(is_store && lookup->IsReadOnly()) ||
lookup->holder() != *global) {
return kUseGeneric;
@@ -4152,8 +4791,9 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Variable* variable = expr->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- if (variable->mode() == LET || variable->mode() == CONST_HARMONY) {
- return Bailout("reference to global harmony declared variable");
+ if (IsLexicalVariableMode(variable->mode())) {
+ // TODO(rossberg): should this be an ASSERT?
+ return Bailout("reference to global lexical variable");
}
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
@@ -4199,9 +4839,8 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::LOCAL: {
HValue* value = environment()->Lookup(variable);
if (value == graph()->GetConstantHole()) {
- ASSERT(variable->mode() == CONST ||
- variable->mode() == CONST_HARMONY ||
- variable->mode() == LET);
+ ASSERT(IsDeclaredVariableMode(variable->mode()) &&
+ variable->mode() != VAR);
return Bailout("reference to uninitialized variable");
}
return ast_context()->ReturnValue(value);
@@ -4233,9 +4872,12 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ Handle<JSFunction> closure = function_state()->compilation_info()->closure();
+ Handle<FixedArray> literals(closure->literals());
HValue* context = environment()->LookupContext();
HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context,
+ literals,
expr->pattern(),
expr->flags(),
expr->literal_index());
@@ -4243,6 +4885,86 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+static void LookupInPrototypes(Handle<Map> map,
+ Handle<String> name,
+ LookupResult* lookup) {
+ while (map->prototype()->IsJSObject()) {
+ Handle<JSObject> holder(JSObject::cast(map->prototype()));
+ if (!holder->HasFastProperties()) break;
+ map = Handle<Map>(holder->map());
+ map->LookupDescriptor(*holder, *name, lookup);
+ if (lookup->IsFound()) return;
+ }
+ lookup->NotFound();
+}
+
+
+// Tries to find a JavaScript accessor of the given name in the prototype chain
+// starting at the given map. Return true iff there is one, including the
+// corresponding AccessorPair plus its holder (which could be null when the
+// accessor is found directly in the given map).
+static bool LookupAccessorPair(Handle<Map> map,
+ Handle<String> name,
+ Handle<AccessorPair>* accessors,
+ Handle<JSObject>* holder) {
+ LookupResult lookup(map->GetIsolate());
+
+ // Check for a JavaScript accessor directly in the map.
+ map->LookupDescriptor(NULL, *name, &lookup);
+ if (lookup.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup.GetValueFromMap(*map));
+ if (!callback->IsAccessorPair()) return false;
+ *accessors = Handle<AccessorPair>::cast(callback);
+ *holder = Handle<JSObject>();
+ return true;
+ }
+
+ // Everything else, e.g. a field, can't be an accessor call.
+ if (lookup.IsFound()) return false;
+
+ // Check for a JavaScript accessor somewhere in the proto chain.
+ LookupInPrototypes(map, name, &lookup);
+ if (lookup.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup.GetValue());
+ if (!callback->IsAccessorPair()) return false;
+ *accessors = Handle<AccessorPair>::cast(callback);
+ *holder = Handle<JSObject>(lookup.holder());
+ return true;
+ }
+
+ // We haven't found a JavaScript accessor anywhere.
+ return false;
+}
+
+
+static bool LookupGetter(Handle<Map> map,
+ Handle<String> name,
+ Handle<JSFunction>* getter,
+ Handle<JSObject>* holder) {
+ Handle<AccessorPair> accessors;
+ if (LookupAccessorPair(map, name, &accessors, holder) &&
+ accessors->getter()->IsJSFunction()) {
+ *getter = Handle<JSFunction>(JSFunction::cast(accessors->getter()));
+ return true;
+ }
+ return false;
+}
+
+
+static bool LookupSetter(Handle<Map> map,
+ Handle<String> name,
+ Handle<JSFunction>* setter,
+ Handle<JSObject>* holder) {
+ Handle<AccessorPair> accessors;
+ if (LookupAccessorPair(map, name, &accessors, holder) &&
+ accessors->setter()->IsJSFunction()) {
+ *setter = Handle<JSFunction>(JSFunction::cast(accessors->setter()));
+ return true;
+ }
+ return false;
+}
+
+
// Determines whether the given array or object literal boilerplate satisfies
// all limits to be considered for fast deep-copying and computes the total
// size of all objects that are part of the graph.
@@ -4258,7 +4980,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
if (boilerplate->HasFastDoubleElements()) {
*total_size += FixedDoubleArray::SizeFor(elements->length());
- } else if (boilerplate->HasFastElements()) {
+ } else if (boilerplate->HasFastObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
@@ -4341,7 +5063,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
@@ -4360,7 +5082,23 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
property->RecordTypeFeedback(oracle());
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
- HInstruction* store = BuildStoreNamed(literal, value, property);
+ Handle<Map> map = property->GetReceiverType();
+ Handle<String> name = property->key()->AsPropertyName();
+ HInstruction* store;
+ if (map.is_null()) {
+ // If we don't know the monomorphic type, do a generic store.
+ CHECK_ALIVE(store = BuildStoreNamedGeneric(literal, name, value));
+ } else {
+#if DEBUG
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ ASSERT(!LookupSetter(map, name, &setter, &holder));
+#endif
+ CHECK_ALIVE(store = BuildStoreNamedMonomorphic(literal,
+ name,
+ value,
+ map));
+ }
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(key->id());
} else {
@@ -4457,7 +5195,9 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
- elements = new(zone()) HLoadElements(literal);
+ // Pass in literal as dummy depedency, since the receiver always has
+ // elements.
+ elements = new(zone()) HLoadElements(literal, literal);
AddInstruction(elements);
HValue* key = AddInstruction(
@@ -4465,22 +5205,21 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Representation::Integer32()));
switch (boilerplate_elements_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
// Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(value));
// Fall through.
case FAST_ELEMENTS:
- AddInstruction(new(zone()) HStoreKeyedFastElement(
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ AddInstruction(new(zone()) HStoreKeyed(
elements,
key,
value,
boilerplate_elements_kind));
break;
- case FAST_DOUBLE_ELEMENTS:
- AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
- key,
- value));
- break;
default:
UNREACHABLE();
break;
@@ -4497,10 +5236,17 @@ static bool ComputeLoadStoreField(Handle<Map> type,
Handle<String> name,
LookupResult* lookup,
bool is_store) {
- type->LookupInDescriptors(NULL, *name, lookup);
- if (!lookup->IsFound()) return false;
- if (lookup->type() == FIELD) return true;
- return is_store && (lookup->type() == MAP_TRANSITION) &&
+ // If we directly find a field, the access can be inlined.
+ type->LookupDescriptor(NULL, *name, lookup);
+ if (lookup->IsField()) return true;
+
+ // For a load, we are out of luck if there is no such field.
+ if (!is_store) return false;
+
+ // 2nd chance: A store into a non-existent field can still be inlined if we
+ // have a matching transition and some room left in the object.
+ type->LookupTransition(NULL, *name, lookup);
+ return lookup->IsTransitionToField(*type) &&
(type->unused_property_fields() > 0);
}
@@ -4508,8 +5254,8 @@ static bool ComputeLoadStoreField(Handle<Map> type,
static int ComputeLoadStoreFieldIndex(Handle<Map> type,
Handle<String> name,
LookupResult* lookup) {
- ASSERT(lookup->type() == FIELD || lookup->type() == MAP_TRANSITION);
- if (lookup->type() == FIELD) {
+ ASSERT(lookup->IsField() || lookup->IsTransitionToField(*type));
+ if (lookup->IsField()) {
return lookup->GetLocalFieldIndexFromMap(*type);
} else {
Map* transition = lookup->GetTransitionMapFromMap(*type);
@@ -4518,31 +5264,60 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type,
}
+void HGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
+ Handle<Map> map) {
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
+}
+
+
HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
- Handle<Map> type,
- LookupResult* lookup,
- bool smi_and_map_check) {
- if (smi_and_map_check) {
- AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, type));
+ Handle<Map> map,
+ LookupResult* lookup) {
+ ASSERT(lookup->IsFound());
+ // If the property does not exist yet, we have to check that it wasn't made
+ // readonly or turned into a setter by some meanwhile modifications on the
+ // prototype chain.
+ if (!lookup->IsProperty() && map->prototype()->IsJSReceiver()) {
+ Object* proto = map->prototype();
+ // First check that the prototype chain isn't affected already.
+ LookupResult proto_result(isolate());
+ proto->Lookup(*name, &proto_result);
+ if (proto_result.IsProperty()) {
+ // If the inherited property could induce readonly-ness, bail out.
+ if (proto_result.IsReadOnly() || !proto_result.IsCacheable()) {
+ Bailout("improper object on prototype chain for store");
+ return NULL;
+ }
+ // We only need to check up to the preexisting property.
+ proto = proto_result.holder();
+ } else {
+ // Otherwise, find the top prototype.
+ while (proto->GetPrototype()->IsJSObject()) proto = proto->GetPrototype();
+ ASSERT(proto->GetPrototype()->IsNull());
+ }
+ ASSERT(proto->IsJSObject());
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
+ Handle<JSObject>(JSObject::cast(map->prototype())),
+ Handle<JSObject>(JSObject::cast(proto))));
}
- int index = ComputeLoadStoreFieldIndex(type, name, lookup);
+ int index = ComputeLoadStoreFieldIndex(map, name, lookup);
bool is_in_object = index < 0;
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
- offset += type->instance_size();
+ offset += map->instance_size();
} else {
offset += FixedArray::kHeaderSize;
}
HStoreNamedField* instr =
new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
- if (lookup->type() == MAP_TRANSITION) {
- Handle<Map> transition(lookup->GetTransitionMapFromMap(*type));
+ if (lookup->IsTransitionToField(*map)) {
+ Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
instr->set_transition(transition);
// TODO(fschneider): Record the new map type of the object in the IR to
// enable elimination of redundant checks after the transition store.
@@ -4565,44 +5340,31 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
}
-HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
+HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
HValue* value,
- ObjectLiteral::Property* prop) {
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->handle());
- ASSERT(!name.is_null());
-
- LookupResult lookup(isolate());
- Handle<Map> type = prop->GetReceiverType();
- bool is_monomorphic = prop->IsMonomorphic() &&
- ComputeLoadStoreField(type, name, &lookup, true);
-
- return is_monomorphic
- ? BuildStoreNamedField(object, name, value, type, &lookup,
- true) // Needs smi and map check.
- : BuildStoreNamedGeneric(object, name, value);
+ Handle<Map> map,
+ Handle<JSFunction> setter,
+ Handle<JSObject> holder) {
+ AddCheckConstantFunction(holder, object, map);
+ AddInstruction(new(zone()) HPushArgument(object));
+ AddInstruction(new(zone()) HPushArgument(value));
+ return new(zone()) HCallConstantFunction(setter, 2);
}
-HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
- HValue* value,
- Expression* expr) {
- Property* prop = (expr->AsProperty() != NULL)
- ? expr->AsProperty()
- : expr->AsAssignment()->target()->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->handle());
- ASSERT(!name.is_null());
-
+HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map) {
+ // Handle a store to a known field.
LookupResult lookup(isolate());
- SmallMapList* types = expr->GetReceiverTypes();
- bool is_monomorphic = expr->IsMonomorphic() &&
- ComputeLoadStoreField(types->first(), name, &lookup, true);
+ if (ComputeLoadStoreField(map, name, &lookup, true)) {
+ AddCheckMapsWithTransitions(object, map);
+ return BuildStoreNamedField(object, name, value, map, &lookup);
+ }
- return is_monomorphic
- ? BuildStoreNamedField(object, name, value, types->first(), &lookup,
- true) // Needs smi and map check.
- : BuildStoreNamedGeneric(object, name, value);
+ // No luck, do a generic store.
+ return BuildStoreNamedGeneric(object, name, value);
}
@@ -4642,16 +5404,18 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
// Use monomorphic load if property lookup results in the same field index
// for all maps. Requires special map check on the set of all handled maps.
+ AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* instr;
if (count == types->length() && is_monomorphic_field) {
- AddInstruction(new(zone()) HCheckMaps(object, types));
- instr = BuildLoadNamedField(object, expr, map, &lookup, false);
+ AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
+ instr = BuildLoadNamedField(object, map, &lookup);
} else {
HValue* context = environment()->LookupContext();
instr = new(zone()) HLoadNamedFieldPolymorphic(context,
object,
types,
- name);
+ name,
+ zone());
}
instr->set_position(expr->position());
@@ -4685,8 +5449,9 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
current_block()->Finish(compare);
set_current_block(if_true);
- HInstruction* instr =
- BuildStoreNamedField(object, name, value, map, &lookup, false);
+ HInstruction* instr;
+ CHECK_ALIVE(instr =
+ BuildStoreNamedField(object, name, value, map, &lookup));
instr->set_position(expr->position());
// Goto will add the HSimulate for the store.
AddInstruction(instr);
@@ -4737,41 +5502,66 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- expr->RecordTypeFeedback(oracle());
+ expr->RecordTypeFeedback(oracle(), zone());
CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* value = NULL;
- HInstruction* instr = NULL;
-
if (prop->key()->IsPropertyName()) {
// Named store.
CHECK_ALIVE(VisitForValue(expr->value()));
- value = Pop();
- HValue* object = Pop();
+ HValue* value = environment()->ExpressionStackAt(0);
+ HValue* object = environment()->ExpressionStackAt(1);
Literal* key = prop->key()->AsLiteral();
Handle<String> name = Handle<String>::cast(key->handle());
ASSERT(!name.is_null());
+ HInstruction* instr = NULL;
SmallMapList* types = expr->GetReceiverTypes();
- LookupResult lookup(isolate());
-
- if (expr->IsMonomorphic()) {
- instr = BuildStoreNamed(object, value, expr);
+ bool monomorphic = expr->IsMonomorphic();
+ Handle<Map> map;
+ if (monomorphic) {
+ map = types->first();
+ if (map->is_dictionary_map()) monomorphic = false;
+ }
+ if (monomorphic) {
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ AddCheckConstantFunction(holder, object, map);
+ if (FLAG_inline_accessors && TryInlineSetter(setter, expr, value)) {
+ return;
+ }
+ Drop(2);
+ AddInstruction(new(zone()) HPushArgument(object));
+ AddInstruction(new(zone()) HPushArgument(value));
+ instr = new(zone()) HCallConstantFunction(setter, 2);
+ } else {
+ Drop(2);
+ CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
+ name,
+ value,
+ map));
+ }
} else if (types != NULL && types->length() > 1) {
- HandlePolymorphicStoreNamedField(expr, object, value, types, name);
- return;
-
+ Drop(2);
+ return HandlePolymorphicStoreNamedField(expr, object, value, types, name);
} else {
+ Drop(2);
instr = BuildStoreNamedGeneric(object, name, value);
}
+ Push(value);
+ instr->set_position(expr->position());
+ AddInstruction(instr);
+ if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ return ast_context()->ReturnValue(Pop());
+
} else {
// Keyed store.
CHECK_ALIVE(VisitForValue(prop->key()));
CHECK_ALIVE(VisitForValue(expr->value()));
- value = Pop();
+ HValue* value = Pop();
HValue* key = Pop();
HValue* object = Pop();
bool has_side_effects = false;
@@ -4784,11 +5574,6 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
}
- Push(value);
- instr->set_position(expr->position());
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
- return ast_context()->ReturnValue(Pop());
}
@@ -4798,7 +5583,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
HValue* value,
int position,
- int ast_id) {
+ BailoutId ast_id) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
@@ -4911,23 +5696,36 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
return ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
- prop->RecordTypeFeedback(oracle());
+ prop->RecordTypeFeedback(oracle(), zone());
if (prop->key()->IsPropertyName()) {
// Named property.
CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* obj = Top();
-
- HInstruction* load = NULL;
- if (prop->IsMonomorphic()) {
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map = prop->GetReceiverTypes()->first();
- load = BuildLoadNamed(obj, prop, map, name);
+ HValue* object = Top();
+
+ Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+ Handle<Map> map;
+ HInstruction* load;
+ bool monomorphic = prop->IsMonomorphic();
+ if (monomorphic) {
+ map = prop->GetReceiverTypes()->first();
+ // We can't generate code for a monomorphic dict mode load so
+ // just pretend it is not monomorphic.
+ if (map->is_dictionary_map()) monomorphic = false;
+ }
+ if (monomorphic) {
+ Handle<JSFunction> getter;
+ Handle<JSObject> holder;
+ if (LookupGetter(map, name, &getter, &holder)) {
+ load = BuildCallGetter(object, map, getter, holder);
+ } else {
+ load = BuildLoadNamedMonomorphic(object, name, prop, map);
+ }
} else {
- load = BuildLoadNamedGeneric(obj, prop);
+ load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(expr->CompoundLoadId());
+ if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
@@ -4937,7 +5735,22 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
PushAndAdd(instr);
if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
- HInstruction* store = BuildStoreNamed(obj, instr, prop);
+ HInstruction* store;
+ if (!monomorphic) {
+ // If we don't know the monomorphic type, do a generic store.
+ CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, instr));
+ } else {
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ store = BuildCallSetter(object, instr, map, setter, holder);
+ } else {
+ CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
+ name,
+ instr,
+ map));
+ }
+ }
AddInstruction(store);
// Drop the simulated receiver and value. Return the value.
Drop(2);
@@ -4954,11 +5767,11 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, expr->CompoundLoadId(), RelocInfo::kNoPosition,
+ obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(expr->CompoundLoadId());
+ if (has_side_effects) AddSimulate(prop->LoadId());
CHECK_ALIVE(VisitForValue(expr->value()));
@@ -4969,7 +5782,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
PushAndAdd(instr);
if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
- expr->RecordTypeFeedback(oracle());
+ expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
RelocInfo::kNoPosition,
true, // is_store
@@ -5017,7 +5830,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
// We insert a use of the old value to detect unsupported uses of const
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
- AddInstruction(new HUseConst(old_value));
+ AddInstruction(new(zone()) HUseConst(old_value));
}
} else if (var->mode() == CONST_HARMONY) {
if (expr->op() != Token::INIT_CONST_HARMONY) {
@@ -5138,20 +5951,13 @@ void HGraphBuilder::VisitThrow(Throw* expr) {
HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- Property* expr,
- Handle<Map> type,
- LookupResult* lookup,
- bool smi_and_map_check) {
- if (smi_and_map_check) {
- AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, type));
- }
-
- int index = lookup->GetLocalFieldIndexFromMap(*type);
+ Handle<Map> map,
+ LookupResult* lookup) {
+ int index = lookup->GetLocalFieldIndexFromMap(*map);
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
- int offset = (index * kPointerSize) + type->instance_size();
+ int offset = (index * kPointerSize) + map->instance_size();
return new(zone()) HLoadNamedField(object, true, offset);
} else {
// Non-negative property indices are in the properties array.
@@ -5161,39 +5967,62 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
}
-HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
+HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
+ Handle<String> name,
Property* expr) {
if (expr->IsUninitialized() && !FLAG_always_opt) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
}
- ASSERT(expr->key()->IsPropertyName());
- Handle<Object> name = expr->key()->AsLiteral()->handle();
HValue* context = environment()->LookupContext();
- return new(zone()) HLoadNamedGeneric(context, obj, name);
+ return new(zone()) HLoadNamedGeneric(context, object, name);
}
-HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
- Property* expr,
- Handle<Map> map,
- Handle<String> name) {
+HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
+ Handle<Map> map,
+ Handle<JSFunction> getter,
+ Handle<JSObject> holder) {
+ AddCheckConstantFunction(holder, object, map);
+ AddInstruction(new(zone()) HPushArgument(object));
+ return new(zone()) HCallConstantFunction(getter, 1);
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
+ Handle<String> name,
+ Property* expr,
+ Handle<Map> map) {
+ // Handle a load from a known field.
+ ASSERT(!map->is_dictionary_map());
LookupResult lookup(isolate());
- map->LookupInDescriptors(NULL, *name, &lookup);
- if (lookup.IsFound() && lookup.type() == FIELD) {
- return BuildLoadNamedField(obj,
- expr,
- map,
- &lookup,
- true);
- } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
- AddInstruction(new(zone()) HCheckNonSmi(obj));
- AddInstruction(HCheckMaps::NewWithTransitions(obj, map));
+ map->LookupDescriptor(NULL, *name, &lookup);
+ if (lookup.IsField()) {
+ AddCheckMapsWithTransitions(object, map);
+ return BuildLoadNamedField(object, map, &lookup);
+ }
+
+ // Handle a load of a constant known function.
+ if (lookup.IsConstantFunction()) {
+ AddCheckMapsWithTransitions(object, map);
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
return new(zone()) HConstant(function, Representation::Tagged());
- } else {
- return BuildLoadNamedGeneric(obj, expr);
}
+
+ // Handle a load from a known field somewhere in the protoype chain.
+ LookupInPrototypes(map, name, &lookup);
+ if (lookup.IsField()) {
+ Handle<JSObject> prototype(JSObject::cast(map->prototype()));
+ Handle<JSObject> holder(lookup.holder());
+ Handle<Map> holder_map(holder->map());
+ AddCheckMapsWithTransitions(object, map);
+ HInstruction* holder_value =
+ AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder));
+ return BuildLoadNamedField(holder_value, holder_map, &lookup);
+ }
+
+ // No luck, do a generic load.
+ return BuildLoadNamedGeneric(object, name, expr);
}
@@ -5208,6 +6037,7 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
HValue* external_elements,
HValue* checked_key,
HValue* val,
+ HValue* dependency,
ElementsKind elements_kind,
bool is_store) {
if (is_store) {
@@ -5235,20 +6065,31 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
break;
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
- return new(zone()) HStoreKeyedSpecializedArrayElement(
- external_elements, checked_key, val, elements_kind);
+ return new(zone()) HStoreKeyed(external_elements,
+ checked_key,
+ val,
+ elements_kind);
} else {
ASSERT(val == NULL);
- return new(zone()) HLoadKeyedSpecializedArrayElement(
- external_elements, checked_key, elements_kind);
+ HLoadKeyed* load =
+ new(zone()) HLoadKeyed(
+ external_elements, checked_key, dependency, elements_kind);
+ if (FLAG_opt_safe_uint32_operations &&
+ elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ graph()->RecordUint32Instruction(load);
+ }
+ return load;
}
}
@@ -5256,20 +6097,22 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
+ HValue* load_dependency,
ElementsKind elements_kind,
bool is_store) {
if (is_store) {
ASSERT(val != NULL);
switch (elements_kind) {
- case FAST_DOUBLE_ELEMENTS:
- return new(zone()) HStoreKeyedFastDoubleElement(
- elements, checked_key, val);
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
// Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(val));
// Fall through.
case FAST_ELEMENTS:
- return new(zone()) HStoreKeyedFastElement(
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return new(zone()) HStoreKeyed(
elements, checked_key, val, elements_kind);
default:
UNREACHABLE();
@@ -5277,57 +6120,144 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
}
}
// It's an element load (!is_store).
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
- } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
- return new(zone()) HLoadKeyedFastElement(elements, checked_key);
- }
+ return new(zone()) HLoadKeyed(elements,
+ checked_key,
+ load_dependency,
+ elements_kind);
}
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
+ HValue* dependency,
Handle<Map> map,
bool is_store) {
- HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMaps(object, map));
- bool fast_smi_only_elements = map->has_fast_smi_only_elements();
- bool fast_elements = map->has_fast_elements();
- HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
+ HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
+ zone(), dependency);
+ AddInstruction(mapcheck);
+ if (dependency) {
+ mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+ }
+ return BuildUncheckedMonomorphicElementAccess(object, key, val,
+ mapcheck, map, is_store);
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ Handle<Map> map,
+ bool is_store) {
+ // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
+ // on a HElementsTransition instruction. The flag can also be removed if the
+ // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
+ // ElementsKind transitions. Finally, the dependency can be removed for stores
+ // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
+ // generated store code.
+ if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
+ (map->elements_kind() == FAST_ELEMENTS && is_store)) {
+ mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+ }
+ bool fast_smi_only_elements = map->has_fast_smi_elements();
+ bool fast_elements = map->has_fast_object_elements();
+ HInstruction* elements =
+ AddInstruction(new(zone()) HLoadElements(object, mapcheck));
if (is_store && (fast_elements || fast_smi_only_elements)) {
- AddInstruction(new(zone()) HCheckMaps(
- elements, isolate()->factory()->fixed_array_map()));
+ HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
+ elements, isolate()->factory()->fixed_array_map(), zone());
+ check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ AddInstruction(check_cow_map);
}
HInstruction* length = NULL;
HInstruction* checked_key = NULL;
if (map->has_external_array_elements()) {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
HLoadExternalArrayPointer* external_elements =
new(zone()) HLoadExternalArrayPointer(elements);
AddInstruction(external_elements);
- return BuildExternalArrayElementAccess(external_elements, checked_key,
- val, map->elements_kind(), is_store);
+ return BuildExternalArrayElementAccess(
+ external_elements, checked_key, val, mapcheck,
+ map->elements_kind(), is_store);
}
ASSERT(fast_smi_only_elements ||
fast_elements ||
map->has_fast_double_elements());
if (map->instance_type() == JS_ARRAY_TYPE) {
- length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
+ length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck,
+ HType::Smi()));
} else {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
}
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- return BuildFastElementAccess(elements, checked_key, val,
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
+ return BuildFastElementAccess(elements, checked_key, val, mapcheck,
map->elements_kind(), is_store);
}
+HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ SmallMapList* maps) {
+ // For polymorphic loads of similar elements kinds (i.e. all tagged or all
+ // double), always use the "worst case" code without a transition. This is
+ // much faster than transitioning the elements to the worst case, trading a
+ // HTransitionElements for a HCheckMaps, and avoiding mutation of the array.
+ bool has_double_maps = false;
+ bool has_smi_or_object_maps = false;
+ bool has_js_array_access = false;
+ bool has_non_js_array_access = false;
+ Handle<Map> most_general_consolidated_map;
+ for (int i = 0; i < maps->length(); ++i) {
+ Handle<Map> map = maps->at(i);
+ // Don't allow mixing of JSArrays with JSObjects.
+ if (map->instance_type() == JS_ARRAY_TYPE) {
+ if (has_non_js_array_access) return NULL;
+ has_js_array_access = true;
+ } else if (has_js_array_access) {
+ return NULL;
+ } else {
+ has_non_js_array_access = true;
+ }
+ // Don't allow mixed, incompatible elements kinds.
+ if (map->has_fast_double_elements()) {
+ if (has_smi_or_object_maps) return NULL;
+ has_double_maps = true;
+ } else if (map->has_fast_smi_or_object_elements()) {
+ if (has_double_maps) return NULL;
+ has_smi_or_object_maps = true;
+ } else {
+ return NULL;
+ }
+ // Remember the most general elements kind, the code for its load will
+ // properly handle all of the more specific cases.
+ if ((i == 0) || IsMoreGeneralElementsKindTransition(
+ most_general_consolidated_map->elements_kind(),
+ map->elements_kind())) {
+ most_general_consolidated_map = map;
+ }
+ }
+ if (!has_double_maps && !has_smi_or_object_maps) return NULL;
+
+ HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
+ AddInstruction(check_maps);
+ HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
+ object, key, val, check_maps, most_general_consolidated_map, false);
+ return instr;
+}
+
+
HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
Expression* prop,
- int ast_id,
+ BailoutId ast_id,
int position,
bool is_store,
bool* has_side_effects) {
@@ -5336,6 +6266,19 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
SmallMapList* maps = prop->GetReceiverTypes();
bool todo_external_array = false;
+ if (!is_store) {
+ HInstruction* consolidated_load =
+ TryBuildConsolidatedElementLoad(object, key, val, maps);
+ if (consolidated_load != NULL) {
+ AddInstruction(consolidated_load);
+ *has_side_effects |= consolidated_load->HasObservableSideEffects();
+ if (position != RelocInfo::kNoPosition) {
+ consolidated_load->set_position(position);
+ }
+ return consolidated_load;
+ }
+ }
+
static const int kNumElementTypes = kElementsKindCount;
bool type_todo[kNumElementTypes];
for (int i = 0; i < kNumElementTypes; ++i) {
@@ -5349,8 +6292,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ElementsKind elements_kind = map->elements_kind();
- if (elements_kind == FAST_DOUBLE_ELEMENTS ||
- elements_kind == FAST_ELEMENTS) {
+ if (IsFastElementsKind(elements_kind) &&
+ elements_kind != GetInitialFastElementsKind()) {
possible_transitioned_maps.Add(map);
}
}
@@ -5364,15 +6307,20 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
int num_untransitionable_maps = 0;
Handle<Map> untransitionable_map;
+ HTransitionElementsKind* transition = NULL;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ASSERT(map->IsMap());
if (!transition_target.at(i).is_null()) {
- AddInstruction(new(zone()) HTransitionElementsKind(
- object, map, transition_target.at(i)));
+ ASSERT(Map::IsValidElementsTransition(
+ map->elements_kind(),
+ transition_target.at(i)->elements_kind()));
+ transition = new(zone()) HTransitionElementsKind(
+ object, map, transition_target.at(i));
+ AddInstruction(transition);
} else {
type_todo[map->elements_kind()] = true;
- if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
+ if (IsExternalArrayElementsKind(map->elements_kind())) {
todo_external_array = true;
}
num_untransitionable_maps++;
@@ -5389,37 +6337,36 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
: BuildLoadKeyedGeneric(object, key));
} else {
instr = AddInstruction(BuildMonomorphicElementAccess(
- object, key, val, untransitionable_map, is_store));
+ object, key, val, transition, untransitionable_map, is_store));
}
*has_side_effects |= instr->HasObservableSideEffects();
- instr->set_position(position);
+ if (position != RelocInfo::kNoPosition) instr->set_position(position);
return is_store ? NULL : instr;
}
- AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
+ HInstruction* checkspec =
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
HBasicBlock* join = graph()->CreateBasicBlock();
HInstruction* elements_kind_instr =
AddInstruction(new(zone()) HElementsKind(object));
- HCompareConstantEqAndBranch* elements_kind_branch = NULL;
- HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
+ HInstruction* elements =
+ AddInstruction(new(zone()) HLoadElements(object, checkspec));
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
- // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
- // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
- // arrays.
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ // Generated code assumes that FAST_* and DICTIONARY_ELEMENTS ElementsKinds
+ // are handled before external arrays.
+ STATIC_ASSERT(FAST_SMI_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) {
- // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
- // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
- // that's executed for all external array cases.
+ // After having handled FAST_* and DICTIONARY_ELEMENTS, we need to add some
+ // code that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@@ -5433,21 +6380,20 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
if (type_todo[elements_kind]) {
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- elements_kind_branch = new(zone()) HCompareConstantEqAndBranch(
- elements_kind_instr, elements_kind, Token::EQ_STRICT);
+ HCompareConstantEqAndBranch* elements_kind_branch =
+ new(zone()) HCompareConstantEqAndBranch(
+ elements_kind_instr, elements_kind, Token::EQ_STRICT);
elements_kind_branch->SetSuccessorAt(0, if_true);
elements_kind_branch->SetSuccessorAt(1, if_false);
current_block()->Finish(elements_kind_branch);
set_current_block(if_true);
HInstruction* access;
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
- elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_DOUBLE_ELEMENTS) {
- if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
+ if (IsFastElementsKind(elements_kind)) {
+ if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
AddInstruction(new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map(),
- elements_kind_branch));
+ zone(), elements_kind_branch));
}
// TODO(jkummerow): The need for these two blocks could be avoided
// in one of two ways:
@@ -5467,10 +6413,13 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_jsarray);
HInstruction* length;
- length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck,
+ HType::Smi()));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind, is_store));
+ elements, checked_key, val, elements_kind_branch,
+ elements_kind, is_store));
if (!is_store) {
Push(access);
}
@@ -5483,9 +6432,11 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind, is_store));
+ elements, checked_key, val, elements_kind_branch,
+ elements_kind, is_store));
} else if (elements_kind == DICTIONARY_ELEMENTS) {
if (is_store) {
access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -5494,10 +6445,11 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
}
} else { // External array elements.
access = AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val, elements_kind, is_store));
+ external_elements, checked_key, val, elements_kind_branch,
+ elements_kind, is_store));
}
*has_side_effects |= access->HasObservableSideEffects();
- access->set_position(position);
+ if (position != RelocInfo::kNoPosition) access->set_position(position);
if (!is_store) {
Push(access);
}
@@ -5518,7 +6470,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
HValue* key,
HValue* val,
Expression* expr,
- int ast_id,
+ BailoutId ast_id,
int position,
bool is_store,
bool* has_side_effects) {
@@ -5531,7 +6483,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
: BuildLoadKeyedGeneric(obj, key);
} else {
AddInstruction(new(zone()) HCheckNonSmi(obj));
- instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
+ instr = BuildMonomorphicElementAccess(obj, key, val, NULL, map, is_store);
}
} else if (expr->GetReceiverTypes() != NULL &&
!expr->GetReceiverTypes()->is_empty()) {
@@ -5544,7 +6496,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
instr = BuildLoadKeyedGeneric(obj, key);
}
}
- instr->set_position(position);
+ if (position != RelocInfo::kNoPosition) instr->set_position(position);
AddInstruction(instr);
*has_side_effects = instr->HasObservableSideEffects();
return instr;
@@ -5572,6 +6524,7 @@ void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Push arguments when entering inlined function.
HEnterInlined* entry = function_state()->entry();
+ entry->set_arguments_pushed();
ZoneList<HValue*>* arguments_values = entry->arguments_values();
@@ -5654,7 +6607,7 @@ void HGraphBuilder::VisitProperty(Property* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- expr->RecordTypeFeedback(oracle());
+ expr->RecordTypeFeedback(oracle(), zone());
if (TryArgumentsAccess(expr)) return;
@@ -5665,13 +6618,12 @@ void HGraphBuilder::VisitProperty(Property* expr) {
HValue* array = Pop();
AddInstruction(new(zone()) HCheckNonSmi(array));
HInstruction* mapcheck =
- AddInstruction(HCheckInstanceType::NewIsJSArray(array));
+ AddInstruction(HCheckInstanceType::NewIsJSArray(array, zone()));
instr = new(zone()) HJSArrayLength(array, mapcheck);
-
} else if (expr->IsStringLength()) {
HValue* string = Pop();
AddInstruction(new(zone()) HCheckNonSmi(string));
- AddInstruction(HCheckInstanceType::NewIsString(string));
+ AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
instr = new(zone()) HStringLength(string);
} else if (expr->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(expr->key()));
@@ -5692,15 +6644,27 @@ void HGraphBuilder::VisitProperty(Property* expr) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
SmallMapList* types = expr->GetReceiverTypes();
- HValue* obj = Pop();
+ bool monomorphic = expr->IsMonomorphic();
+ Handle<Map> map;
if (expr->IsMonomorphic()) {
- instr = BuildLoadNamed(obj, expr, types->first(), name);
+ map = types->first();
+ if (map->is_dictionary_map()) monomorphic = false;
+ }
+ if (monomorphic) {
+ Handle<JSFunction> getter;
+ Handle<JSObject> holder;
+ if (LookupGetter(map, name, &getter, &holder)) {
+ AddCheckConstantFunction(holder, Top(), map);
+ if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return;
+ AddInstruction(new(zone()) HPushArgument(Pop()));
+ instr = new(zone()) HCallConstantFunction(getter, 1);
+ } else {
+ instr = BuildLoadNamedMonomorphic(Pop(), name, expr, map);
+ }
} else if (types != NULL && types->length() > 1) {
- AddInstruction(new(zone()) HCheckNonSmi(obj));
- HandlePolymorphicLoadNamedField(expr, obj, types, name);
- return;
+ return HandlePolymorphicLoadNamedField(expr, Pop(), types, name);
} else {
- instr = BuildLoadNamedGeneric(obj, expr);
+ instr = BuildLoadNamedGeneric(Pop(), name, expr);
}
} else {
@@ -5730,22 +6694,23 @@ void HGraphBuilder::VisitProperty(Property* expr) {
}
-void HGraphBuilder::AddCheckConstantFunction(Call* expr,
+void HGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
+ Handle<Map> receiver_map) {
+ if (!holder.is_null()) {
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
+ Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
+ }
+}
+
+
+void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
HValue* receiver,
- Handle<Map> receiver_map,
- bool smi_and_map_check) {
+ Handle<Map> receiver_map) {
// Constant functions have the nice property that the map will change if they
// are overwritten. Therefore it is enough to check the map of the holder and
// its prototypes.
- if (smi_and_map_check) {
- AddInstruction(new(zone()) HCheckNonSmi(receiver));
- AddInstruction(HCheckMaps::NewWithTransitions(receiver, receiver_map));
- }
- if (!expr->holder().is_null()) {
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(receiver_map->prototype())),
- expr->holder()));
- }
+ AddCheckMapsWithTransitions(receiver, receiver_map);
+ AddCheckPrototypeMaps(holder, receiver_map);
}
@@ -5827,7 +6792,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
set_current_block(if_true);
expr->ComputeTarget(map, name);
- AddCheckConstantFunction(expr, receiver, map, false);
+ AddCheckPrototypeMaps(expr->holder(), map);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
Handle<JSFunction> caller = info()->closure();
SmartArrayPointer<char> caller_name =
@@ -5941,11 +6906,11 @@ int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
bool HGraphBuilder::TryInline(CallKind call_kind,
Handle<JSFunction> target,
- ZoneList<Expression*>* arguments,
- HValue* receiver,
- int ast_id,
- int return_id,
- ReturnHandlingFlag return_handling) {
+ int arguments_count,
+ HValue* implicit_return_value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ InliningKind inlining_kind) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -6002,13 +6967,13 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
}
// Parse and allocate variables.
- CompilationInfo target_info(target);
+ CompilationInfo target_info(target, zone());
if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
!Scope::Analyze(&target_info)) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
- target_shared->DisableOptimization();
+ target_shared->DisableOptimization("parse/scope error");
}
TraceInline(target, caller, "parse failure");
return false;
@@ -6074,7 +7039,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// The scope info might not have been set if a lazily compiled
// function is inlined before being called for the first time.
Handle<ScopeInfo> target_scope_info =
- ScopeInfo::Create(target_info.scope());
+ ScopeInfo::Create(target_info.scope(), zone());
target_shared->set_scope_info(*target_scope_info);
}
target_shared->EnableDeoptimizationSupport(*target_info.code());
@@ -6090,31 +7055,34 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Save the pending call context and type feedback oracle. Set up new ones
// for the inlined function.
ASSERT(target_shared->has_deoptimization_support());
+ Handle<Code> unoptimized_code(target_shared->code());
TypeFeedbackOracle target_oracle(
- Handle<Code>(target_shared->code()),
- Handle<Context>(target->context()->global_context()),
- isolate());
+ unoptimized_code,
+ Handle<Context>(target->context()->native_context()),
+ isolate(),
+ zone());
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
- this, &target_info, &target_oracle, return_handling);
+ this, &target_info, &target_oracle, inlining_kind);
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner_env =
environment()->CopyForInlining(target,
- arguments->length(),
+ arguments_count,
function,
undefined,
call_kind,
- function_state()->is_construct());
+ function_state()->inlining_kind());
#ifdef V8_TARGET_ARCH_IA32
// IA32 only, overwrite the caller's context in the deoptimization
// environment with the correct one.
//
// TODO(kmillikin): implement the same inlining on other platforms so we
// can remove the unsightly ifdefs in this function.
- HConstant* context = new HConstant(Handle<Context>(target->context()),
- Representation::Tagged());
+ HConstant* context =
+ new(zone()) HConstant(Handle<Context>(target->context()),
+ Representation::Tagged());
AddInstruction(context);
inner_env->BindContext(context);
#endif
@@ -6129,18 +7097,18 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
if (function->scope()->arguments() != NULL) {
HEnvironment* arguments_env = inner_env->arguments_environment();
int arguments_count = arguments_env->parameter_count();
- arguments_values = new(zone()) ZoneList<HValue*>(arguments_count);
+ arguments_values = new(zone()) ZoneList<HValue*>(arguments_count, zone());
for (int i = 0; i < arguments_count; i++) {
- arguments_values->Add(arguments_env->Lookup(i));
+ arguments_values->Add(arguments_env->Lookup(i), zone());
}
}
HEnterInlined* enter_inlined =
new(zone()) HEnterInlined(target,
- arguments->length(),
+ arguments_count,
function,
call_kind,
- function_state()->is_construct(),
+ function_state()->inlining_kind(),
function->scope()->arguments(),
arguments_values);
function_state()->set_entry(enter_inlined);
@@ -6160,7 +7128,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Bail out if the inline function did, as we cannot residualize a call
// instead.
TraceInline(target, caller, "inline graph construction failed");
- target_shared->DisableOptimization();
+ target_shared->DisableOptimization("inlining bailed out");
inline_bailout_ = true;
delete target_state;
return true;
@@ -6169,30 +7137,51 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Update inlined nodes count.
inlined_count_ += nodes_added;
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
+ Handle<TypeFeedbackInfo> type_info(
+ Handle<TypeFeedbackInfo>::cast(maybe_type_info));
+ graph()->update_type_change_checksum(type_info->own_type_change_checksum());
+
TraceInline(target, caller, NULL);
if (current_block() != NULL) {
- // Add default return value (i.e. undefined for normals calls or the newly
- // allocated receiver for construct calls) if control can fall off the
- // body. In a test context, undefined is false and any JSObject is true.
- if (call_context()->IsValue()) {
- ASSERT(function_return() != NULL);
- HValue* return_value = function_state()->is_construct()
- ? receiver
- : undefined;
- current_block()->AddLeaveInlined(return_value,
- function_return(),
- function_state());
- } else if (call_context()->IsEffect()) {
- ASSERT(function_return() != NULL);
- current_block()->Goto(function_return(), function_state());
+ FunctionState* state = function_state();
+ if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
+ // Falling off the end of an inlined construct call. In a test context the
+ // return value will always evaluate to true, in a value context the
+ // return value is the newly allocated receiver.
+ if (call_context()->IsTest()) {
+ current_block()->Goto(inlined_test_context()->if_true(), state);
+ } else if (call_context()->IsEffect()) {
+ current_block()->Goto(function_return(), state);
+ } else {
+ ASSERT(call_context()->IsValue());
+ current_block()->AddLeaveInlined(implicit_return_value, state);
+ }
+ } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
+ // Falling off the end of an inlined setter call. The returned value is
+ // never used, the value of an assignment is always the value of the RHS
+ // of the assignment.
+ if (call_context()->IsTest()) {
+ inlined_test_context()->ReturnValue(implicit_return_value);
+ } else if (call_context()->IsEffect()) {
+ current_block()->Goto(function_return(), state);
+ } else {
+ ASSERT(call_context()->IsValue());
+ current_block()->AddLeaveInlined(implicit_return_value, state);
+ }
} else {
- ASSERT(call_context()->IsTest());
- ASSERT(inlined_test_context() != NULL);
- HBasicBlock* target = function_state()->is_construct()
- ? inlined_test_context()->if_true()
- : inlined_test_context()->if_false();
- current_block()->Goto(target, function_state());
+ // Falling off the end of a normal inlined function. This basically means
+ // returning undefined.
+ if (call_context()->IsTest()) {
+ current_block()->Goto(inlined_test_context()->if_false(), state);
+ } else if (call_context()->IsEffect()) {
+ current_block()->Goto(function_return(), state);
+ } else {
+ ASSERT(call_context()->IsValue());
+ current_block()->AddLeaveInlined(undefined, state);
+ }
}
}
@@ -6240,7 +7229,7 @@ bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
return TryInline(call_kind,
expr->target(),
- expr->arguments(),
+ expr->arguments()->length(),
NULL,
expr->id(),
expr->ReturnId(),
@@ -6248,17 +7237,43 @@ bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
}
-bool HGraphBuilder::TryInlineConstruct(CallNew* expr, HValue* receiver) {
+bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_FUNCTION,
expr->target(),
- expr->arguments(),
- receiver,
+ expr->arguments()->length(),
+ implicit_return_value,
expr->id(),
expr->ReturnId(),
CONSTRUCT_CALL_RETURN);
}
+bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+ Property* prop) {
+ return TryInline(CALL_AS_METHOD,
+ getter,
+ 0,
+ NULL,
+ prop->id(),
+ prop->LoadId(),
+ GETTER_CALL_RETURN);
+}
+
+
+bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+ Assignment* assignment,
+ HValue* implicit_return_value) {
+ return TryInline(CALL_AS_METHOD,
+ setter,
+ 1,
+ implicit_return_value,
+ assignment->id(),
+ assignment->AssignmentId(),
+ SETTER_CALL_RETURN);
+}
+
+
bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
@@ -6332,7 +7347,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
case kMathCos:
case kMathTan:
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* argument = Pop();
HValue* context = environment()->LookupContext();
Drop(1); // Receiver.
@@ -6345,7 +7360,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
break;
case kMathPow:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* right = Pop();
HValue* left = Pop();
Pop(); // Pop receiver.
@@ -6387,7 +7402,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
break;
case kMathRandom:
if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
Drop(1); // Receiver.
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -6400,82 +7415,15 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
case kMathMax:
case kMathMin:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* right = Pop();
HValue* left = Pop();
- Pop(); // Pop receiver.
-
- HValue* left_operand = left;
- HValue* right_operand = right;
-
- // If we do not have two integers, we convert to double for comparison.
- if (!left->representation().IsInteger32() ||
- !right->representation().IsInteger32()) {
- if (!left->representation().IsDouble()) {
- HChange* left_convert = new(zone()) HChange(
- left,
- Representation::Double(),
- false, // Do not truncate when converting to double.
- true); // Deoptimize for undefined.
- left_convert->SetFlag(HValue::kBailoutOnMinusZero);
- left_operand = AddInstruction(left_convert);
- }
- if (!right->representation().IsDouble()) {
- HChange* right_convert = new(zone()) HChange(
- right,
- Representation::Double(),
- false, // Do not truncate when converting to double.
- true); // Deoptimize for undefined.
- right_convert->SetFlag(HValue::kBailoutOnMinusZero);
- right_operand = AddInstruction(right_convert);
- }
- }
-
- ASSERT(left_operand->representation().Equals(
- right_operand->representation()));
- ASSERT(!left_operand->representation().IsTagged());
-
- Token::Value op = (id == kMathMin) ? Token::LT : Token::GT;
-
- HCompareIDAndBranch* compare =
- new(zone()) HCompareIDAndBranch(left_operand, right_operand, op);
- compare->SetInputRepresentation(left_operand->representation());
-
- HBasicBlock* return_left = graph()->CreateBasicBlock();
- HBasicBlock* return_right = graph()->CreateBasicBlock();
-
- compare->SetSuccessorAt(0, return_left);
- compare->SetSuccessorAt(1, return_right);
- current_block()->Finish(compare);
-
- set_current_block(return_left);
- Push(left);
- set_current_block(return_right);
- // The branch above always returns the right operand if either of
- // them is NaN, but the spec requires that max/min(NaN, X) = NaN.
- // We add another branch that checks if the left operand is NaN or not.
- if (left_operand->representation().IsDouble()) {
- // If left_operand != left_operand then it is NaN.
- HCompareIDAndBranch* compare_nan = new(zone()) HCompareIDAndBranch(
- left_operand, left_operand, Token::EQ);
- compare_nan->SetInputRepresentation(left_operand->representation());
- HBasicBlock* left_is_number = graph()->CreateBasicBlock();
- HBasicBlock* left_is_nan = graph()->CreateBasicBlock();
- compare_nan->SetSuccessorAt(0, left_is_number);
- compare_nan->SetSuccessorAt(1, left_is_nan);
- current_block()->Finish(compare_nan);
- set_current_block(left_is_nan);
- Push(left);
- set_current_block(left_is_number);
- Push(right);
- return_right = CreateJoin(left_is_number, left_is_nan, expr->id());
- } else {
- Push(right);
- }
-
- HBasicBlock* join = CreateJoin(return_left, return_right, expr->id());
- set_current_block(join);
- ast_context()->ReturnValue(Pop());
+ Drop(1); // Receiver.
+ HValue* context = environment()->LookupContext();
+ HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
+ : HMathMinMax::kMathMax;
+ HMathMinMax* result = new(zone()) HMathMinMax(context, left, right, op);
+ ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
@@ -6516,7 +7464,7 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
VisitForValue(prop->obj());
if (HasStackOverflow() || current_block() == NULL) return true;
HValue* function = Top();
- AddCheckConstantFunction(expr, function, function_map, true);
+ AddCheckConstantFunction(expr->holder(), function, function_map);
Drop(1);
VisitForValue(args->at(0));
@@ -6635,7 +7583,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
call = PreProcessCall(
new(zone()) HCallNamed(context, name, argument_count));
} else {
- AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
if (TryInlineCall(expr)) return;
call = PreProcessCall(
@@ -6706,6 +7654,11 @@ void HGraphBuilder::VisitCall(Call* expr) {
return;
}
if (TryInlineCall(expr)) return;
+
+ if (expr->target().is_identical_to(info()->closure())) {
+ graph()->MarkRecursive();
+ }
+
call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
argument_count));
} else {
@@ -6728,8 +7681,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
HValue* function = Top();
HValue* context = environment()->LookupContext();
HGlobalObject* global = new(zone()) HGlobalObject(context);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
AddInstruction(global);
+ HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
@@ -6759,8 +7712,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
HValue* function = Top();
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
AddInstruction(global_object);
+ HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
AddInstruction(receiver);
PushAndAdd(new(zone()) HPushArgument(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
@@ -6833,8 +7786,8 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
- HValue* constructor = NULL;
- CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
+ CHECK_ALIVE(VisitArgument(expr->expression()));
+ HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HInstruction* call =
new(zone()) HCallNew(context, constructor, argument_count);
@@ -6892,7 +7845,6 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
int argument_count = expr->arguments()->length();
HCallRuntime* call =
new(zone()) HCallRuntime(context, name, function, argument_count);
- call->set_position(RelocInfo::kNoPosition);
Drop(argument_count);
return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -7147,8 +8099,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot::Mode mode =
- (var->mode() == LET || var->mode() == CONST_HARMONY)
+ HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
HStoreContextSlot* instr =
new(zone()) HStoreContextSlot(context, var->index(), mode, after);
@@ -7166,30 +8117,56 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
} else {
// Argument of the count operation is a property.
ASSERT(prop != NULL);
- prop->RecordTypeFeedback(oracle());
+ prop->RecordTypeFeedback(oracle(), zone());
if (prop->key()->IsPropertyName()) {
// Named property.
if (returns_original_input) Push(graph_->GetConstantUndefined());
CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* obj = Top();
-
- HInstruction* load = NULL;
- if (prop->IsMonomorphic()) {
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map = prop->GetReceiverTypes()->first();
- load = BuildLoadNamed(obj, prop, map, name);
+ HValue* object = Top();
+
+ Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+ Handle<Map> map;
+ HInstruction* load;
+ bool monomorphic = prop->IsMonomorphic();
+ if (monomorphic) {
+ map = prop->GetReceiverTypes()->first();
+ if (map->is_dictionary_map()) monomorphic = false;
+ }
+ if (monomorphic) {
+ Handle<JSFunction> getter;
+ Handle<JSObject> holder;
+ if (LookupGetter(map, name, &getter, &holder)) {
+ load = BuildCallGetter(object, map, getter, holder);
+ } else {
+ load = BuildLoadNamedMonomorphic(object, name, prop, map);
+ }
} else {
- load = BuildLoadNamedGeneric(obj, prop);
+ load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(expr->CountId());
+ if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
after = BuildIncrement(returns_original_input, expr);
input = Pop();
- HInstruction* store = BuildStoreNamed(obj, after, prop);
+ HInstruction* store;
+ if (!monomorphic) {
+ // If we don't know the monomorphic type, do a generic store.
+ CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, after));
+ } else {
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ store = BuildCallSetter(object, after, map, setter, holder);
+ } else {
+ CHECK_ALIVE(store = BuildStoreNamedMonomorphic(object,
+ name,
+ after,
+ map));
+ }
+ }
AddInstruction(store);
// Overwrite the receiver in the bailout environment with the result
@@ -7210,16 +8187,16 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, expr->CountId(), RelocInfo::kNoPosition,
+ obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(expr->CountId());
+ if (has_side_effects) AddSimulate(prop->LoadId());
after = BuildIncrement(returns_original_input, expr);
input = Pop();
- expr->RecordTypeFeedback(oracle());
+ expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
RelocInfo::kNoPosition,
true, // is_store
@@ -7245,7 +8222,7 @@ HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
HValue* string,
HValue* index) {
AddInstruction(new(zone()) HCheckNonSmi(string));
- AddInstruction(HCheckInstanceType::NewIsString(string));
+ AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
HStringLength* length = new(zone()) HStringLength(string);
AddInstruction(length);
HInstruction* checked_index =
@@ -7253,6 +8230,61 @@ HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
return new(zone()) HStringCharCodeAt(context, string, checked_index);
}
+// Checks if the given shift amounts have form: (sa) and (32 - sa).
+static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
+ HValue* const32_minus_sa) {
+ if (!const32_minus_sa->IsSub()) return false;
+ HSub* sub = HSub::cast(const32_minus_sa);
+ HValue* const32 = sub->left();
+ if (!const32->IsConstant() ||
+ HConstant::cast(const32)->Integer32Value() != 32) {
+ return false;
+ }
+ return (sub->right() == sa);
+}
+
+
+// Checks if the left and the right are shift instructions with the oposite
+// directions that can be replaced by one rotate right instruction or not.
+// Returns the operand and the shift amount for the rotate instruction in the
+// former case.
+bool HGraphBuilder::MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount) {
+ HShl* shl;
+ HShr* shr;
+ if (left->IsShl() && right->IsShr()) {
+ shl = HShl::cast(left);
+ shr = HShr::cast(right);
+ } else if (left->IsShr() && right->IsShl()) {
+ shl = HShl::cast(right);
+ shr = HShr::cast(left);
+ } else {
+ return false;
+ }
+
+ if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) &&
+ !ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) {
+ return false;
+ }
+ *operand= shr->left();
+ *shift_amount = shr->right();
+ return true;
+}
+
+
+bool CanBeZero(HValue *right) {
+ if (right->IsConstant()) {
+ HConstant* right_const = HConstant::cast(right);
+ if (right_const->HasInteger32Value() &&
+ (right_const->Integer32Value() & 0x1f) != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
@@ -7269,9 +8301,9 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
case Token::ADD:
if (info.IsString()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsString(left));
+ AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsString(right));
+ AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
instr = new(zone()) HStringAdd(context, left, right);
} else {
instr = HAdd::NewHAdd(zone(), context, left, right);
@@ -7291,14 +8323,27 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
break;
case Token::BIT_XOR:
case Token::BIT_AND:
- case Token::BIT_OR:
instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
break;
+ case Token::BIT_OR: {
+ HValue* operand, *shift_amount;
+ if (info.IsInteger32() &&
+ MatchRotateRight(left, right, &operand, &shift_amount)) {
+ instr = new(zone()) HRor(context, operand, shift_amount);
+ } else {
+ instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
+ }
+ break;
+ }
case Token::SAR:
instr = HSar::NewHSar(zone(), context, left, right);
break;
case Token::SHR:
instr = HShr::NewHShr(zone(), context, left, right);
+ if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
+ CanBeZero(right)) {
+ graph()->RecordUint32Instruction(instr);
+ }
break;
case Token::SHL:
instr = HShl::NewHShl(zone(), context, left, right);
@@ -7311,14 +8356,16 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
// for a smi operation. If one of the operands is a constant string
// do not generate code assuming it is a smi operation.
if (info.IsSmi() &&
- ((left->IsConstant() && HConstant::cast(left)->HasStringValue()) ||
- (right->IsConstant() && HConstant::cast(right)->HasStringValue()))) {
+ ((left->IsConstant() && HConstant::cast(left)->handle()->IsString()) ||
+ (right->IsConstant() && HConstant::cast(right)->handle()->IsString()))) {
return instr;
}
Representation rep = ToRepresentation(info);
// We only generate either int32 or generic tagged bitwise operations.
- if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
- rep = Representation::Integer32();
+ if (instr->IsBitwiseBinaryOperation()) {
+ HBitwiseBinaryOperation::cast(instr)->
+ InitializeObservedInputRepresentation(rep);
+ if (rep.IsDouble()) rep = Representation::Integer32();
}
TraceRepresentation(expr->op(), info, instr, rep);
instr->AssumeRepresentation(rep);
@@ -7395,7 +8442,7 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
- unsigned test_id = expr->left()->test_id();
+ TypeFeedbackId test_id = expr->left()->test_id();
ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
HBranch* test = is_logical_and
? new(zone()) HBranch(Top(), eval_right, empty_block, expected)
@@ -7528,7 +8575,7 @@ static bool MatchLiteralCompareTypeof(HValue* left,
if (left->IsTypeof() &&
Token::IsEqualityOp(op) &&
right->IsConstant() &&
- HConstant::cast(right)->HasStringValue()) {
+ HConstant::cast(right)->handle()->IsString()) {
*typeof_expr = HTypeof::cast(left);
*check = Handle<String>::cast(HConstant::cast(right)->handle());
return true;
@@ -7634,9 +8681,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Handle<GlobalObject> global(info()->global_object());
LookupResult lookup(isolate());
global->Lookup(*name, &lookup);
- if (lookup.IsFound() &&
- lookup.type() == NORMAL &&
- lookup.GetValue()->IsJSFunction()) {
+ if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
@@ -7670,19 +8715,17 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// Can we get away with map check and not instance type check?
Handle<Map> map = oracle()->GetCompareMap(expr);
if (!map.is_null()) {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckMaps::NewWithTransitions(left, map));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckMaps::NewWithTransitions(right, map));
+ AddCheckMapsWithTransitions(left, map);
+ AddCheckMapsWithTransitions(right, map);
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
@@ -7695,9 +8738,9 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
} else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
(op == Token::EQ || op == Token::EQ_STRICT)) {
AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSymbol(left));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSymbol(right));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
@@ -7734,13 +8777,25 @@ void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
}
+HInstruction* HGraphBuilder::BuildThisFunction() {
+ // If we share optimized code between different closures, the
+ // this-function is not a constant, except inside an inlined body.
+ if (function_state()->outer() != NULL) {
+ return new(zone()) HConstant(
+ function_state()->compilation_info()->closure(),
+ Representation::Tagged());
+ } else {
+ return new(zone()) HThisFunction;
+ }
+}
+
+
void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- HThisFunction* self = new(zone()) HThisFunction(
- function_state()->compilation_info()->closure());
- return ast_context()->ReturnInstruction(self, expr->id());
+ HInstruction* instr = BuildThisFunction();
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -7769,11 +8824,11 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_.Add(variable->name());
+ globals_.Add(variable->name(), zone());
globals_.Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
- globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ : isolate()->factory()->undefined_value(), zone());
+ globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
return;
case Variable::PARAMETER:
case Variable::LOCAL:
@@ -7786,7 +8841,7 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
if (hole_init) {
HValue* value = graph()->GetConstantHole();
HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new HStoreContextSlot(
+ HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
@@ -7803,13 +8858,13 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_.Add(variable->name());
+ globals_.Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), info()->script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_.Add(function);
- globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ globals_.Add(function, zone());
+ globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
return;
}
case Variable::PARAMETER:
@@ -7823,7 +8878,7 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
CHECK_ALIVE(VisitForValue(declaration->fun()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
- HStoreContextSlot* store = new HStoreContextSlot(
+ HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
@@ -7969,7 +9024,7 @@ void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
if (function_state()->outer() != NULL) {
// We are generating graph for inlined function.
- HValue* value = function_state()->is_construct()
+ HValue* value = function_state()->inlining_kind() == CONSTRUCT_CALL_RETURN
? graph()->GetConstantTrue()
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
@@ -8005,8 +9060,10 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) {
HInstruction* elements = AddInstruction(
new(zone()) HArgumentsElements(false));
HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
+ HInstruction* checked_index =
+ AddInstruction(new(zone()) HBoundsCheck(index, length));
HAccessArgumentsAt* result =
- new(zone()) HAccessArgumentsAt(elements, length, index);
+ new(zone()) HAccessArgumentsAt(elements, length, checked_index);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8069,11 +9126,11 @@ void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
// Create in-object property store to kValueOffset.
set_current_block(if_js_value);
Handle<String> name = isolate()->factory()->undefined_symbol();
- AddInstruction(new HStoreNamedField(object,
- name,
- value,
- true, // in-object store.
- JSValue::kValueOffset));
+ AddInstruction(new(zone()) HStoreNamedField(object,
+ name,
+ value,
+ true, // in-object store.
+ JSValue::kValueOffset));
if_js_value->Goto(join);
join->SetJoinId(call->id());
set_current_block(join);
@@ -8361,33 +9418,38 @@ void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
HEnvironment::HEnvironment(HEnvironment* outer,
Scope* scope,
- Handle<JSFunction> closure)
+ Handle<JSFunction> closure,
+ Zone* zone)
: closure_(closure),
- values_(0),
- assigned_variables_(4),
+ values_(0, zone),
+ assigned_variables_(4, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(1),
local_count_(0),
outer_(outer),
+ entry_(NULL),
pop_count_(0),
push_count_(0),
- ast_id_(AstNode::kNoNumber) {
+ ast_id_(BailoutId::None()),
+ zone_(zone) {
Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
}
-HEnvironment::HEnvironment(const HEnvironment* other)
- : values_(0),
- assigned_variables_(0),
+HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
+ : values_(0, zone),
+ assigned_variables_(0, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(1),
local_count_(0),
outer_(NULL),
+ entry_(NULL),
pop_count_(0),
push_count_(0),
- ast_id_(other->ast_id()) {
+ ast_id_(other->ast_id()),
+ zone_(zone) {
Initialize(other);
}
@@ -8395,17 +9457,20 @@ HEnvironment::HEnvironment(const HEnvironment* other)
HEnvironment::HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure,
FrameType frame_type,
- int arguments)
+ int arguments,
+ Zone* zone)
: closure_(closure),
- values_(arguments),
- assigned_variables_(0),
+ values_(arguments, zone),
+ assigned_variables_(0, zone),
frame_type_(frame_type),
parameter_count_(arguments),
local_count_(0),
outer_(outer),
+ entry_(NULL),
pop_count_(0),
push_count_(0),
- ast_id_(AstNode::kNoNumber) {
+ ast_id_(BailoutId::None()),
+ zone_(zone) {
}
@@ -8417,19 +9482,20 @@ void HEnvironment::Initialize(int parameter_count,
// Avoid reallocating the temporaries' backing store on the first Push.
int total = parameter_count + specials_count_ + local_count + stack_height;
- values_.Initialize(total + 4);
- for (int i = 0; i < total; ++i) values_.Add(NULL);
+ values_.Initialize(total + 4, zone());
+ for (int i = 0; i < total; ++i) values_.Add(NULL, zone());
}
void HEnvironment::Initialize(const HEnvironment* other) {
closure_ = other->closure();
- values_.AddAll(other->values_);
- assigned_variables_.AddAll(other->assigned_variables_);
+ values_.AddAll(other->values_, zone());
+ assigned_variables_.AddAll(other->assigned_variables_, zone());
frame_type_ = other->frame_type_;
parameter_count_ = other->parameter_count_;
local_count_ = other->local_count_;
if (other->outer_ != NULL) outer_ = other->outer_->Copy(); // Deep copy.
+ entry_ = other->entry_;
pop_count_ = other->pop_count_;
push_count_ = other->push_count_;
ast_id_ = other->ast_id_;
@@ -8453,7 +9519,7 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
} else if (values_[i] != other->values_[i]) {
// There is a fresh value on the incoming edge, a phi is needed.
ASSERT(values_[i] != NULL && other->values_[i] != NULL);
- HPhi* phi = new(block->zone()) HPhi(i);
+ HPhi* phi = new(zone()) HPhi(i, zone());
HValue* old_value = values_[i];
for (int j = 0; j < block->predecessors()->length(); j++) {
phi->AddInput(old_value);
@@ -8469,7 +9535,7 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
void HEnvironment::Bind(int index, HValue* value) {
ASSERT(value != NULL);
if (!assigned_variables_.Contains(index)) {
- assigned_variables_.Add(index);
+ assigned_variables_.Add(index, zone());
}
values_[index] = value;
}
@@ -8509,7 +9575,7 @@ void HEnvironment::Drop(int count) {
HEnvironment* HEnvironment::Copy() const {
- return new(closure()->GetIsolate()->zone()) HEnvironment(this);
+ return new(zone()) HEnvironment(this, zone());
}
@@ -8523,7 +9589,7 @@ HEnvironment* HEnvironment::CopyWithoutHistory() const {
HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
HEnvironment* new_env = Copy();
for (int i = 0; i < values_.length(); ++i) {
- HPhi* phi = new(loop_header->zone()) HPhi(i);
+ HPhi* phi = new(zone()) HPhi(i, zone());
phi->AddInput(values_[i]);
new_env->values_[i] = phi;
loop_header->AddPhi(phi);
@@ -8537,8 +9603,9 @@ HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
Handle<JSFunction> target,
FrameType frame_type,
int arguments) const {
- HEnvironment* new_env = new(closure()->GetIsolate()->zone())
- HEnvironment(outer, target, frame_type, arguments + 1);
+ HEnvironment* new_env =
+ new(zone()) HEnvironment(outer, target, frame_type,
+ arguments + 1, zone());
for (int i = 0; i <= arguments; ++i) { // Include receiver.
new_env->Push(ExpressionStackAt(arguments - i));
}
@@ -8553,11 +9620,9 @@ HEnvironment* HEnvironment::CopyForInlining(
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind,
- bool is_construct) const {
+ InliningKind inlining_kind) const {
ASSERT(frame_type() == JS_FUNCTION);
- Zone* zone = closure()->GetIsolate()->zone();
-
// Outer environment is a copy of this one without the arguments.
int arity = function->scope()->num_parameters();
@@ -8565,11 +9630,19 @@ HEnvironment* HEnvironment::CopyForInlining(
outer->Drop(arguments + 1); // Including receiver.
outer->ClearHistory();
- if (is_construct) {
+ if (inlining_kind == CONSTRUCT_CALL_RETURN) {
// Create artificial constructor stub environment. The receiver should
// actually be the constructor function, but we pass the newly allocated
// object instead, DoComputeConstructStubFrame() relies on that.
outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments);
+ } else if (inlining_kind == GETTER_CALL_RETURN) {
+ // We need an additional StackFrame::INTERNAL frame for restoring the
+ // correct context.
+ outer = CreateStubEnvironment(outer, target, JS_GETTER, arguments);
+ } else if (inlining_kind == SETTER_CALL_RETURN) {
+ // We need an additional StackFrame::INTERNAL frame for temporarily saving
+ // the argument of the setter, see StoreStubCompiler::CompileStoreViaSetter.
+ outer = CreateStubEnvironment(outer, target, JS_SETTER, arguments);
}
if (arity != arguments) {
@@ -8578,7 +9651,7 @@ HEnvironment* HEnvironment::CopyForInlining(
}
HEnvironment* inner =
- new(zone) HEnvironment(outer, function->scope(), target);
+ new(zone()) HEnvironment(outer, function->scope(), target, zone());
// Get the argument values from the original environment.
for (int i = 0; i <= arity; ++i) { // Include receiver.
HValue* push = (i <= arguments) ?
@@ -8589,7 +9662,7 @@ HEnvironment* HEnvironment::CopyForInlining(
// builtin function, pass undefined as the receiver for function
// calls (instead of the global receiver).
if ((target->shared()->native() || !function->is_classic_mode()) &&
- call_kind == CALL_AS_FUNCTION && !is_construct) {
+ call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN) {
inner->SetValueAt(0, undefined);
}
inner->SetValueAt(arity + 1, LookupContext());
@@ -8597,7 +9670,7 @@ HEnvironment* HEnvironment::CopyForInlining(
inner->SetValueAt(i, undefined);
}
- inner->set_ast_id(AstNode::kFunctionEntryId);
+ inner->set_ast_id(BailoutId::FunctionEntry());
return inner;
}
@@ -8768,27 +9841,28 @@ void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
for (int i = 0; i < fixed_d->length(); ++i) {
- TraceLiveRange(fixed_d->at(i), "fixed");
+ TraceLiveRange(fixed_d->at(i), "fixed", allocator->zone());
}
const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
for (int i = 0; i < fixed->length(); ++i) {
- TraceLiveRange(fixed->at(i), "fixed");
+ TraceLiveRange(fixed->at(i), "fixed", allocator->zone());
}
const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
for (int i = 0; i < live_ranges->length(); ++i) {
- TraceLiveRange(live_ranges->at(i), "object");
+ TraceLiveRange(live_ranges->at(i), "object", allocator->zone());
}
}
-void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
+void HTracer::TraceLiveRange(LiveRange* range, const char* type,
+ Zone* zone) {
if (range != NULL && !range->IsEmpty()) {
PrintIndent();
trace_.Add("%d %s", range->id(), type);
if (range->HasRegisterAssigned()) {
- LOperand* op = range->CreateAssignedOperand(ZONE);
+ LOperand* op = range->CreateAssignedOperand(zone);
int assigned_reg = op->index();
if (op->IsDoubleRegister()) {
trace_.Add(" \"%s\"",
diff --git a/src/3rdparty/v8/src/hydrogen.h b/src/3rdparty/v8/src/hydrogen.h
index b56a5af..3748970 100644
--- a/src/3rdparty/v8/src/hydrogen.h
+++ b/src/3rdparty/v8/src/hydrogen.h
@@ -77,7 +77,7 @@ class HBasicBlock: public ZoneObject {
return &deleted_phis_;
}
void RecordDeletedPhi(int merge_index) {
- deleted_phis_.Add(merge_index);
+ deleted_phis_.Add(merge_index, zone());
}
HBasicBlock* dominator() const { return dominator_; }
HEnvironment* last_environment() const { return last_environment_; }
@@ -118,14 +118,14 @@ class HBasicBlock: public ZoneObject {
bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
- void SetJoinId(int ast_id);
+ void SetJoinId(BailoutId ast_id);
void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction);
void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
+ void AddSimulate(BailoutId ast_id) { AddInstruction(CreateSimulate(ast_id)); }
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
@@ -135,9 +135,7 @@ class HBasicBlock: public ZoneObject {
// Add the inlined function exit sequence, adding an HLeaveInlined
// instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value,
- HBasicBlock* target,
- FunctionState* state = NULL);
+ void AddLeaveInlined(HValue* return_value, FunctionState* state);
// If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence:
@@ -158,7 +156,7 @@ class HBasicBlock: public ZoneObject {
dominates_loop_successors_ = true;
}
- inline Zone* zone();
+ inline Zone* zone() const;
#ifdef DEBUG
void Verify();
@@ -168,7 +166,7 @@ class HBasicBlock: public ZoneObject {
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(int ast_id);
+ HSimulate* CreateSimulate(BailoutId ast_id);
HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
@@ -212,12 +210,12 @@ class HPredecessorIterator BASE_EMBEDDED {
class HLoopInformation: public ZoneObject {
public:
- explicit HLoopInformation(HBasicBlock* loop_header)
- : back_edges_(4),
+ HLoopInformation(HBasicBlock* loop_header, Zone* zone)
+ : back_edges_(4, zone),
loop_header_(loop_header),
- blocks_(8),
+ blocks_(8, zone),
stack_check_(NULL) {
- blocks_.Add(loop_header);
+ blocks_.Add(loop_header, zone);
}
virtual ~HLoopInformation() {}
@@ -247,7 +245,8 @@ class HGraph: public ZoneObject {
explicit HGraph(CompilationInfo* info);
Isolate* isolate() { return isolate_; }
- Zone* zone() { return isolate_->zone(); }
+ Zone* zone() const { return zone_; }
+ CompilationInfo* info() const { return info_; }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
@@ -259,6 +258,7 @@ class HGraph: public ZoneObject {
void InsertRepresentationChanges();
void MarkDeoptimizeOnUndefined();
void ComputeMinusZeroChecks();
+ void ComputeSafeUint32Operations();
bool ProcessArgumentsObject();
void EliminateRedundantPhis();
void EliminateUnreachablePhis();
@@ -268,7 +268,9 @@ class HGraph: public ZoneObject {
void ReplaceCheckedValues();
void EliminateRedundantBoundsChecks();
void DehoistSimpleArrayIndexComputations();
+ void DeadCodeElimination();
void PropagateDeoptimizingMark();
+ void EliminateUnusedInstructions();
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
@@ -280,8 +282,6 @@ class HGraph: public ZoneObject {
void CollectPhis();
- Handle<Code> Compile(CompilationInfo* info);
-
void set_undefined_constant(HConstant* constant) {
undefined_constant_.set(constant);
}
@@ -304,7 +304,7 @@ class HGraph: public ZoneObject {
int GetMaximumValueID() const { return values_.length(); }
int GetNextBlockID() { return next_block_id_++; }
int GetNextValueID(HValue* value) {
- values_.Add(value);
+ values_.Add(value, zone());
return values_.length() - 1;
}
HValue* LookupValue(int id) const {
@@ -312,6 +312,8 @@ class HGraph: public ZoneObject {
return NULL;
}
+ bool Optimize(SmartArrayPointer<char>* bailout_reason);
+
#ifdef DEBUG
void Verify(bool do_full_verify) const;
#endif
@@ -336,17 +338,39 @@ class HGraph: public ZoneObject {
osr_values_.set(values);
}
+ int update_type_change_checksum(int delta) {
+ type_change_checksum_ += delta;
+ return type_change_checksum_;
+ }
+
+ bool use_optimistic_licm() {
+ return use_optimistic_licm_;
+ }
+
+ void set_use_optimistic_licm(bool value) {
+ use_optimistic_licm_ = value;
+ }
+
+ void MarkRecursive() {
+ is_recursive_ = true;
+ }
+
+ bool is_recursive() const {
+ return is_recursive_;
+ }
+
+ void RecordUint32Instruction(HInstruction* instr) {
+ if (uint32_instructions_ == NULL) {
+ uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
+ }
+ uint32_instructions_->Add(instr, zone());
+ }
+
private:
- void Postorder(HBasicBlock* block,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header);
- void PostorderLoopBlocks(HLoopInformation* loop,
- BitVector* visited,
- ZoneList<HBasicBlock*>* order,
- HBasicBlock* loop_header);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
- Object* value);
+ Handle<Object> value);
+ HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer,
+ int32_t integer_value);
void MarkAsDeoptimizingRecursively(HBasicBlock* block);
void InsertTypeConversions(HInstruction* instr);
@@ -369,6 +393,7 @@ class HGraph: public ZoneObject {
ZoneList<HBasicBlock*> blocks_;
ZoneList<HValue*> values_;
ZoneList<HPhi*>* phi_list_;
+ ZoneList<HInstruction*>* uint32_instructions_;
SetOncePointer<HConstant> undefined_constant_;
SetOncePointer<HConstant> constant_1_;
SetOncePointer<HConstant> constant_minus1_;
@@ -380,29 +405,36 @@ class HGraph: public ZoneObject {
SetOncePointer<HBasicBlock> osr_loop_entry_;
SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
+ CompilationInfo* info_;
+ Zone* zone_;
+
+ bool is_recursive_;
+ bool use_optimistic_licm_;
+ int type_change_checksum_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
-Zone* HBasicBlock::zone() { return graph_->zone(); }
+Zone* HBasicBlock::zone() const { return graph_->zone(); }
// Type of stack frame an environment might refer to.
-enum FrameType { JS_FUNCTION, JS_CONSTRUCT, ARGUMENTS_ADAPTOR };
+enum FrameType {
+ JS_FUNCTION,
+ JS_CONSTRUCT,
+ JS_GETTER,
+ JS_SETTER,
+ ARGUMENTS_ADAPTOR
+};
class HEnvironment: public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
Scope* scope,
- Handle<JSFunction> closure);
-
- HEnvironment* DiscardInlined(bool drop_extra) {
- HEnvironment* outer = outer_;
- while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
- if (drop_extra) outer->Drop(1);
- return outer;
- }
+ Handle<JSFunction> closure,
+ Zone* zone);
HEnvironment* arguments_environment() {
return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
@@ -422,8 +454,11 @@ class HEnvironment: public ZoneObject {
int pop_count() const { return pop_count_; }
int push_count() const { return push_count_; }
- int ast_id() const { return ast_id_; }
- void set_ast_id(int id) { ast_id_ = id; }
+ BailoutId ast_id() const { return ast_id_; }
+ void set_ast_id(BailoutId id) { ast_id_ = id; }
+
+ HEnterInlined* entry() const { return entry_; }
+ void set_entry(HEnterInlined* entry) { entry_ = entry; }
int length() const { return values_.length(); }
bool is_special_index(int i) const {
@@ -462,7 +497,7 @@ class HEnvironment: public ZoneObject {
void Push(HValue* value) {
ASSERT(value != NULL);
++push_count_;
- values_.Add(value);
+ values_.Add(value, zone());
}
HValue* Pop() {
@@ -501,7 +536,14 @@ class HEnvironment: public ZoneObject {
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind,
- bool is_construct) const;
+ InliningKind inlining_kind) const;
+
+ HEnvironment* DiscardInlined(bool drop_extra) {
+ HEnvironment* outer = outer_;
+ while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
+ if (drop_extra) outer->Drop(1);
+ return outer;
+ }
void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
@@ -519,13 +561,16 @@ class HEnvironment: public ZoneObject {
void PrintTo(StringStream* stream);
void PrintToStd();
+ Zone* zone() const { return zone_; }
+
private:
- explicit HEnvironment(const HEnvironment* other);
+ HEnvironment(const HEnvironment* other, Zone* zone);
HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure,
FrameType frame_type,
- int arguments);
+ int arguments,
+ Zone* zone);
// Create an artificial stub environment (e.g. for argument adaptor or
// constructor stub).
@@ -560,9 +605,11 @@ class HEnvironment: public ZoneObject {
int specials_count_;
int local_count_;
HEnvironment* outer_;
+ HEnterInlined* entry_;
int pop_count_;
int push_count_;
- int ast_id_;
+ BailoutId ast_id_;
+ Zone* zone_;
};
@@ -590,13 +637,13 @@ class AstContext {
// Add a hydrogen instruction to the instruction stream (recording an
// environment simulation if necessary) and then fill this context with
// the instruction as value.
- virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
+ virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id) = 0;
// Finishes the current basic block and materialize a boolean for
// value context, nothing for effect, generate a branch for test context.
// Call this function in tail position in the Visit functions for
// expressions.
- virtual void ReturnControl(HControlInstruction* instr, int ast_id) = 0;
+ virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id) = 0;
void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
bool is_for_typeof() { return for_typeof_; }
@@ -607,7 +654,7 @@ class AstContext {
HGraphBuilder* owner() const { return owner_; }
- inline Zone* zone();
+ inline Zone* zone() const;
// We want to be able to assert, in a context-specific way, that the stack
// height makes sense when the context is filled.
@@ -631,8 +678,8 @@ class EffectContext: public AstContext {
virtual ~EffectContext();
virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, int ast_id);
- virtual void ReturnControl(HControlInstruction* instr, int ast_id);
+ virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
};
@@ -644,8 +691,8 @@ class ValueContext: public AstContext {
virtual ~ValueContext();
virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, int ast_id);
- virtual void ReturnControl(HControlInstruction* instr, int ast_id);
+ virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -658,17 +705,19 @@ class TestContext: public AstContext {
public:
TestContext(HGraphBuilder* owner,
Expression* condition,
+ TypeFeedbackOracle* oracle,
HBasicBlock* if_true,
HBasicBlock* if_false)
: AstContext(owner, Expression::kTest),
condition_(condition),
+ oracle_(oracle),
if_true_(if_true),
if_false_(if_false) {
}
virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, int ast_id);
- virtual void ReturnControl(HControlInstruction* instr, int ast_id);
+ virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
@@ -676,6 +725,7 @@ class TestContext: public AstContext {
}
Expression* condition() const { return condition_; }
+ TypeFeedbackOracle* oracle() const { return oracle_; }
HBasicBlock* if_true() const { return if_true_; }
HBasicBlock* if_false() const { return if_false_; }
@@ -685,31 +735,24 @@ class TestContext: public AstContext {
void BuildBranch(HValue* value);
Expression* condition_;
+ TypeFeedbackOracle* oracle_;
HBasicBlock* if_true_;
HBasicBlock* if_false_;
};
-enum ReturnHandlingFlag {
- NORMAL_RETURN,
- DROP_EXTRA_ON_RETURN,
- CONSTRUCT_CALL_RETURN
-};
-
-
class FunctionState {
public:
FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
- ReturnHandlingFlag return_handling);
+ InliningKind inlining_kind);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
TypeFeedbackOracle* oracle() { return oracle_; }
AstContext* call_context() { return call_context_; }
- bool drop_extra() { return return_handling_ == DROP_EXTRA_ON_RETURN; }
- bool is_construct() { return return_handling_ == CONSTRUCT_CALL_RETURN; }
+ InliningKind inlining_kind() const { return inlining_kind_; }
HBasicBlock* function_return() { return function_return_; }
TestContext* test_context() { return test_context_; }
void ClearInlinedTestContext() {
@@ -739,11 +782,8 @@ class FunctionState {
// inlined. NULL when not inlining.
AstContext* call_context_;
- // Indicate whether we have to perform special handling on return from
- // inlined functions.
- // - DROP_EXTRA_ON_RETURN: Drop an extra value from the environment.
- // - CONSTRUCT_CALL_RETURN: Either use allocated receiver or return value.
- ReturnHandlingFlag return_handling_;
+ // The kind of call which is currently being inlined.
+ InliningKind inlining_kind_;
// When inlining in an effect or value context, this is the return block.
// It is NULL otherwise. When inlining in a test context, there are a
@@ -840,7 +880,7 @@ class HGraphBuilder: public AstVisitor {
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(int ast_id);
+ void AddSimulate(BailoutId ast_id);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
@@ -850,7 +890,7 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* CreateJoin(HBasicBlock* first,
HBasicBlock* second,
- int join_id);
+ BailoutId join_id);
TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
@@ -858,6 +898,12 @@ class HGraphBuilder: public AstVisitor {
void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void* operator new(size_t size, Zone* zone) {
+ return zone->New(static_cast<int>(size));
+ }
+ void operator delete(void* pointer, Zone* zone) { }
+ void operator delete(void* pointer) { }
+
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -963,9 +1009,8 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* true_block,
HBasicBlock* false_block);
- // Visit an argument subexpression and emit a push to the outgoing
- // arguments. Returns the hydrogen value that was pushed.
- HValue* VisitArgument(Expression* expr);
+ // Visit an argument subexpression and emit a push to the outgoing arguments.
+ void VisitArgument(Expression* expr);
void VisitArgumentList(ZoneList<Expression*>* arguments);
@@ -1014,14 +1059,18 @@ class HGraphBuilder: public AstVisitor {
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(CallKind call_kind,
Handle<JSFunction> target,
- ZoneList<Expression*>* arguments,
- HValue* receiver,
- int ast_id,
- int return_id,
- ReturnHandlingFlag return_handling);
+ int arguments_count,
+ HValue* implicit_return_value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ InliningKind inlining_kind);
bool TryInlineCall(Call* expr, bool drop_extra = false);
- bool TryInlineConstruct(CallNew* expr, HValue* receiver);
+ bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
+ bool TryInlineGetter(Handle<JSFunction> getter, Property* prop);
+ bool TryInlineSetter(Handle<JSFunction> setter,
+ Assignment* assignment,
+ HValue* implicit_return_value);
bool TryInlineBuiltinMethodCall(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
@@ -1038,7 +1087,7 @@ class HGraphBuilder: public AstVisitor {
void HandleGlobalVariableAssignment(Variable* var,
HValue* value,
int position,
- int ast_id);
+ BailoutId ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
@@ -1070,36 +1119,37 @@ class HGraphBuilder: public AstVisitor {
HValue* right);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HLoadNamedField* BuildLoadNamedField(HValue* object,
- Property* expr,
- Handle<Map> type,
- LookupResult* result,
- bool smi_and_map_check);
- HInstruction* BuildLoadNamedGeneric(HValue* object, Property* expr);
- HInstruction* BuildLoadKeyedGeneric(HValue* object,
- HValue* key);
- HInstruction* BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- ElementsKind elements_kind,
- bool is_store);
HInstruction* BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
+ HValue* dependency,
ElementsKind elements_kind,
bool is_store);
+ HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
+ HValue* key,
+ HValue* val,
+ SmallMapList* maps);
+
+ HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ Handle<Map> map,
+ bool is_store);
+
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
+ HValue* dependency,
Handle<Map> map,
bool is_store);
+
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
Expression* prop,
- int ast_id,
+ BailoutId ast_id,
int position,
bool is_store,
bool* has_side_effects);
@@ -1108,42 +1158,75 @@ class HGraphBuilder: public AstVisitor {
HValue* key,
HValue* val,
Expression* expr,
- int ast_id,
+ BailoutId ast_id,
int position,
bool is_store,
bool* has_side_effects);
- HInstruction* BuildLoadNamed(HValue* object,
- Property* prop,
- Handle<Map> map,
- Handle<String> name);
- HInstruction* BuildStoreNamed(HValue* object,
- HValue* value,
- Expression* expr);
- HInstruction* BuildStoreNamed(HValue* object,
- HValue* value,
- ObjectLiteral::Property* prop);
+ HLoadNamedField* BuildLoadNamedField(HValue* object,
+ Handle<Map> map,
+ LookupResult* result);
+ HInstruction* BuildLoadNamedGeneric(HValue* object,
+ Handle<String> name,
+ Property* expr);
+ HInstruction* BuildCallGetter(HValue* object,
+ Handle<Map> map,
+ Handle<JSFunction> getter,
+ Handle<JSObject> holder);
+ HInstruction* BuildLoadNamedMonomorphic(HValue* object,
+ Handle<String> name,
+ Property* expr,
+ Handle<Map> map);
+ HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
+ HInstruction* BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ void AddCheckMapsWithTransitions(HValue* object,
+ Handle<Map> map);
+
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
- Handle<Map> type,
- LookupResult* lookup,
- bool smi_and_map_check);
+ Handle<Map> map,
+ LookupResult* lookup);
HInstruction* BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value);
+ HInstruction* BuildCallSetter(HValue* object,
+ HValue* value,
+ Handle<Map> map,
+ Handle<JSFunction> setter,
+ Handle<JSObject> holder);
+ HInstruction* BuildStoreNamedMonomorphic(HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map);
HInstruction* BuildStoreKeyedGeneric(HValue* object,
HValue* key,
HValue* value);
HValue* BuildContextChainWalk(Variable* var);
- void AddCheckConstantFunction(Call* expr,
+ HInstruction* BuildThisFunction();
+
+ void AddCheckPrototypeMaps(Handle<JSObject> holder,
+ Handle<Map> receiver_map);
+
+ void AddCheckConstantFunction(Handle<JSObject> holder,
HValue* receiver,
- Handle<Map> receiver_map,
- bool smi_and_map_check);
+ Handle<Map> receiver_map);
+
+ bool MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount);
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
@@ -1175,12 +1258,12 @@ class HGraphBuilder: public AstVisitor {
};
-Zone* AstContext::zone() { return owner_->zone(); }
+Zone* AstContext::zone() const { return owner_->zone(); }
class HValueMap: public ZoneObject {
public:
- HValueMap()
+ explicit HValueMap(Zone* zone)
: array_size_(0),
lists_size_(0),
count_(0),
@@ -1188,15 +1271,15 @@ class HValueMap: public ZoneObject {
array_(NULL),
lists_(NULL),
free_list_head_(kNil) {
- ResizeLists(kInitialSize);
- Resize(kInitialSize);
+ ResizeLists(kInitialSize, zone);
+ Resize(kInitialSize, zone);
}
void Kill(GVNFlagSet flags);
- void Add(HValue* value) {
+ void Add(HValue* value, Zone* zone) {
present_flags_.Add(value->gvn_flags());
- Insert(value);
+ Insert(value, zone);
}
HValue* Lookup(HValue* value) const;
@@ -1220,9 +1303,9 @@ class HValueMap: public ZoneObject {
HValueMap(Zone* zone, const HValueMap* other);
- void Resize(int new_size);
- void ResizeLists(int new_size);
- void Insert(HValue* value);
+ void Resize(int new_size, Zone* zone);
+ void ResizeLists(int new_size, Zone* zone);
+ void Insert(HValue* value, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
@@ -1241,6 +1324,7 @@ class HSideEffectMap BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
+ HSideEffectMap& operator= (const HSideEffectMap& other);
void Kill(GVNFlagSet flags);
@@ -1374,7 +1458,7 @@ class HTracer: public Malloced {
WriteChars(filename, "", 0, false);
}
- void TraceLiveRange(LiveRange* range, const char* type);
+ void TraceLiveRange(LiveRange* range, const char* type, Zone* zone);
void Trace(const char* name, HGraph* graph, LChunk* chunk);
void FlushToFile();
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h b/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
index 3cf0d00..114f878 100644
--- a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
+++ b/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
@@ -46,12 +46,21 @@ namespace v8 {
namespace internal {
+static const byte kCallOpcode = 0xE8;
+
+
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
CPU::FlushICache(p, sizeof(uint32_t));
+ } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ if (*pc_ == kCallOpcode) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
+ }
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
@@ -150,10 +159,7 @@ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
+ return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
}
@@ -172,6 +178,21 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + 1));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(*pc_ == kCallOpcode);
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+}
+
+
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -209,7 +230,7 @@ Object** RelocInfo::call_object_address() {
bool RelocInfo::IsPatchedReturnSequence() {
- return *pc_ == 0xE8;
+ return *pc_ == kCallOpcode;
}
@@ -230,7 +251,9 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
-#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
+ #ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
@@ -258,6 +281,8 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -338,9 +363,9 @@ void Assembler::emit(Handle<Object> handle) {
}
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, unsigned id) {
- if (rmode == RelocInfo::CODE_TARGET && id != kNoASTId) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, static_cast<intptr_t>(id));
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
+ if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
} else if (rmode != RelocInfo::NONE) {
RecordRelocInfo(rmode);
}
@@ -390,6 +415,11 @@ void Assembler::set_target_address_at(Address pc, Address target) {
}
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
}
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.cc b/src/3rdparty/v8/src/ia32/assembler-ia32.cc
index a42f632..06fc411 100644
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/assembler-ia32.cc
@@ -169,7 +169,7 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::DEBUG_BREAK_SLOT;
+ 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
bool RelocInfo::IsCodedSpecially() {
@@ -314,8 +314,7 @@ static void InitCoverageLog();
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
+ positions_recorder_(this) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -1064,6 +1063,25 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
}
}
+void Assembler::ror(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ ASSERT(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xC8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xC8 | dst.code());
+ EMIT(imm8);
+ }
+}
+
+void Assembler::ror_cl(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ EMIT(0xC8 | dst.code());
+}
+
void Assembler::sar(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
@@ -1373,7 +1391,7 @@ void Assembler::bind_to(Label* L, int pos) {
ASSERT(offset_to_next <= 0);
// Relative address, relative to point after address.
int disp = pos - fixup_pos - sizeof(int8_t);
- ASSERT(0 <= disp && disp <= 127);
+ CHECK(0 <= disp && disp <= 127);
set_byte_at(fixup_pos, disp);
if (offset_to_next < 0) {
L->link_to(fixup_pos + offset_to_next, Label::kNear);
@@ -1440,7 +1458,7 @@ int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
void Assembler::call(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
ASSERT(RelocInfo::IsCodeTarget(rmode));
@@ -1501,7 +1519,7 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
- ASSERT(0 <= cc && cc < 16);
+ ASSERT(0 <= cc && static_cast<int>(cc) < 16);
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
@@ -1533,7 +1551,7 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- ASSERT((0 <= cc) && (cc < 16));
+ ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
// 0000 1111 1000 tttn #32-bit disp.
EMIT(0x0F);
EMIT(0x80 | cc);
@@ -1938,6 +1956,16 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
}
+void Assembler::cvtsd2si(Register dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x2D);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2044,6 +2072,15 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2086,6 +2123,16 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
+void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x76);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.h b/src/3rdparty/v8/src/ia32/assembler-ia32.h
index 4ead80b..9fb7baa 100644
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.h
+++ b/src/3rdparty/v8/src/ia32/assembler-ia32.h
@@ -584,9 +584,6 @@ class Assembler : public AssemblerBase {
Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -596,6 +593,10 @@ class Assembler : public AssemblerBase {
inline static Address target_address_at(Address pc);
inline static void set_target_address_at(Address pc, Address target);
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -624,6 +625,7 @@ class Assembler : public AssemblerBase {
static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
static const int kCallInstructionLength = 5;
+ static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
static const int kJSReturnSequenceLength = 6;
// The debug break slot must be able to contain a call instruction.
@@ -807,6 +809,8 @@ class Assembler : public AssemblerBase {
void rcl(Register dst, uint8_t imm8);
void rcr(Register dst, uint8_t imm8);
+ void ror(Register dst, uint8_t imm8);
+ void ror_cl(Register dst);
void sar(Register dst, uint8_t imm8);
void sar_cl(Register dst);
@@ -883,8 +887,8 @@ class Assembler : public AssemblerBase {
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId);
+ RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None());
// Jumps
// unconditional jump to L
@@ -978,6 +982,7 @@ class Assembler : public AssemblerBase {
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
+ void cvtsd2si(Register dst, XMMRegister src);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
@@ -993,6 +998,7 @@ class Assembler : public AssemblerBase {
void sqrtsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
+ void orpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
@@ -1009,6 +1015,7 @@ class Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
void cmpltsd(XMMRegister dst, XMMRegister src);
+ void pcmpeqd(XMMRegister dst, XMMRegister src);
void movaps(XMMRegister dst, XMMRegister src);
@@ -1110,8 +1117,6 @@ class Assembler : public AssemblerBase {
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
- bool emit_debug_code() const { return emit_debug_code_; }
-
void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src);
@@ -1136,7 +1141,7 @@ class Assembler : public AssemblerBase {
inline void emit(Handle<Object> handle);
inline void emit(uint32_t x,
RelocInfo::Mode rmode,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId id = TypeFeedbackId::None());
inline void emit(const Immediate& x);
inline void emit_w(const Immediate& x);
@@ -1184,9 +1189,6 @@ class Assembler : public AssemblerBase {
RelocInfoWriter reloc_info_writer;
PositionsRecorder positions_recorder_;
-
- bool emit_debug_code_;
-
friend class PositionsRecorder;
};
diff --git a/src/3rdparty/v8/src/ia32/builtins-ia32.cc b/src/3rdparty/v8/src/ia32/builtins-ia32.cc
index a36763d..01785bb 100644
--- a/src/3rdparty/v8/src/ia32/builtins-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/builtins-ia32.cc
@@ -74,6 +74,43 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
+
+ __ push(edi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -501,6 +538,42 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Re-execute the code that was patched back to the young age when
+ // the stub returns.
+ __ sub(Operand(esp, 0), Immediate(5));
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(1, ebx);
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ }
+ __ popad();
+ __ ret(0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -641,9 +714,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
__ mov(ebx, FieldOperand(ebx, kGlobalIndex));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
@@ -819,9 +892,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
__ mov(ebx, FieldOperand(ebx, kGlobalOffset));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
@@ -900,7 +973,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -1003,7 +1076,8 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
- __ LoadInitialArrayMap(array_function, scratch, elements_array);
+ __ LoadInitialArrayMap(array_function, scratch,
+ elements_array, fill_with_hole);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
@@ -1274,11 +1348,11 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(&prepare_generic_code_call);
__ bind(&not_double);
- // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
__ mov(ebx, Operand(esp, 0));
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ONLY_ELEMENTS,
+ FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
edi,
eax,
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc b/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
index fe9db7b..80954b8 100644
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
@@ -66,9 +66,13 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in esi.
+ Counters* counters = masm->isolate()->counters();
+
Label gc;
__ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
+ __ IncrementCounter(counters->fast_new_closure_total(), 1);
+
// Get the function info from the stack.
__ mov(edx, Operand(esp, 1 * kPointerSize));
@@ -76,12 +80,12 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current global context and set that
+ // Compute the function map in the current native context and set that
// as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
- __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+ __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
@@ -94,11 +98,20 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
__ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
__ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(factory->undefined_value()));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ Label check_optimized;
+ Label install_unoptimized;
+ if (FLAG_cache_optimized_code) {
+ __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ test(ebx, ebx);
+ __ j(not_zero, &check_optimized, Label::kNear);
+ }
+ __ bind(&install_unoptimized);
+ __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
+ Immediate(factory->undefined_value()));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
@@ -106,6 +119,68 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
+ __ bind(&check_optimized);
+
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
+
+ // ecx holds native context, ebx points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // Map must never be empty, so check the first elements.
+ Label install_optimized;
+ // Speculatively move code object into edx.
+ __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
+ __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
+ __ j(equal, &install_optimized);
+
+ // Iterate through the rest of map backwards. edx holds an index as a Smi.
+ Label loop;
+ Label restore;
+ __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ bind(&loop);
+ // Do not double check first entry.
+ __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ j(equal, &restore);
+ __ sub(edx, Immediate(Smi::FromInt(
+ SharedFunctionInfo::kEntryLength))); // Skip an entry.
+ __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
+ __ j(not_equal, &loop, Label::kNear);
+ // Hit: fetch the optimized code.
+ __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
+
+ __ bind(&install_optimized);
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
+
+ // TODO(fschneider): Idea: store proper code pointers in the optimized code
+ // map and either unmangle them on marking or do nothing as the whole map is
+ // discarded on major GC anyway.
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
+
+ // Now link a function into a list of optimized functions.
+ __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+ __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
+ // No need for write barrier as JSFunction (eax) is in the new space.
+
+ __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
+ // Store JSFunction (eax) into edx before issuing write barrier as
+ // it clobbers all the registers passed.
+ __ mov(edx, eax);
+ __ RecordWriteContextSlot(
+ ecx,
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+ edx,
+ ebx,
+ kDontSaveFPRegs);
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ __ bind(&restore);
+ // Restore SharedFunctionInfo into edx.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ jmp(&install_unoptimized);
+
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ pop(ecx); // Temporarily remove return address.
@@ -142,12 +217,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
// Copy the global object from the previous context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
+ __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), ebx);
// Copy the qml global object from the previous context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), ebx);
+ __ mov(ebx, Operand(esi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ mov(Operand(eax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)), ebx);
// Initialize the rest of the slots to undefined.
@@ -191,9 +266,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
- // If this block context is nested in the global context we get a smi
+ // If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
+ // canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
@@ -203,7 +278,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(equal, message);
}
__ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
__ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
@@ -213,12 +288,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
// Copy the global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
- __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
+ __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
+ __ mov(ContextOperand(eax, Context::GLOBAL_OBJECT_INDEX), ebx);
// Copy the qml global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::QML_GLOBAL_INDEX));
- __ mov(ContextOperand(eax, Context::QML_GLOBAL_INDEX), ebx);
+ __ mov(ebx, ContextOperand(esi, Context::QML_GLOBAL_OBJECT_INDEX));
+ __ mov(ContextOperand(eax, Context::QML_GLOBAL_OBJECT_INDEX), ebx);
// Initialize the rest of the slots to the hole value.
if (slots_ == 1) {
@@ -1727,9 +1802,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
if (result_type_ <= BinaryOpIC::INT32) {
__ cvttsd2si(ecx, Operand(xmm0));
__ cvtsi2sd(xmm2, ecx);
- __ ucomisd(xmm0, xmm2);
- __ j(not_zero, &not_int32);
- __ j(carry, &not_int32);
+ __ pcmpeqd(xmm2, xmm0);
+ __ movmskpd(ecx, xmm2);
+ __ test(ecx, Immediate(1));
+ __ j(zero, &not_int32);
}
GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -3147,21 +3223,28 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg, while_true, no_multiply;
+ Label no_neg, while_true, while_false;
__ test(scratch, scratch);
__ j(positive, &no_neg, Label::kNear);
__ neg(scratch);
__ bind(&no_neg);
- __ bind(&while_true);
+ __ j(zero, &while_false, Label::kNear);
__ shr(scratch, 1);
- __ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(double_result, double_scratch);
- __ bind(&no_multiply);
+ // Above condition means CF==0 && ZF==0. This means that the
+ // bit that has been shifted out is 0 and the result is not 0.
+ __ j(above, &while_true, Label::kNear);
+ __ movsd(double_result, double_scratch);
+ __ j(zero, &while_false, Label::kNear);
+ __ bind(&while_true);
+ __ shr(scratch, 1);
__ mulsd(double_scratch, double_scratch);
+ __ j(above, &while_true, Label::kNear);
+ __ mulsd(double_result, double_scratch);
__ j(not_zero, &while_true);
+ __ bind(&while_false);
// scratch has the original value of the exponent - if the exponent is
// negative, return 1/result.
__ test(exponent, exponent);
@@ -3368,10 +3451,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// esp[0] = mapped parameter count (tagged)
// esp[8] = parameter count (tagged)
// esp[12] = address of receiver argument
- // Get the arguments boilerplate from the current (global) context into edi.
+ // Get the arguments boilerplate from the current native context into edi.
Label has_mapped_parameters, copy;
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -3519,7 +3602,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ pop(eax); // Remove saved parameter count.
__ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -3561,9 +3644,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
- // Get the arguments boilerplate from the current (global) context.
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ // Get the arguments boilerplate from the current native context.
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
const int offset =
Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
__ mov(edi, Operand(edi, offset));
@@ -3682,7 +3765,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(edx, Immediate(2)); // edx was a smi.
// Check that the static offsets vector buffer is large enough.
- __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
+ __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize);
__ j(above, &runtime);
// ecx: RegExp data (FixedArray)
@@ -3831,20 +3914,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
+ static const int kRegExpExecuteArguments = 9;
__ EnterApiExitFrame(kRegExpExecuteArguments);
- // Argument 8: Pass current isolate address.
- __ mov(Operand(esp, 7 * kPointerSize),
+ // Argument 9: Pass current isolate address.
+ __ mov(Operand(esp, 8 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
- // Argument 6: Start (high end) of backtracking stack memory area.
+ // Argument 7: Start (high end) of backtracking stack memory area.
__ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 5 * kPointerSize), esi);
+ __ mov(Operand(esp, 6 * kPointerSize), esi);
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
// Argument 5: static offsets vector buffer.
__ mov(Operand(esp, 4 * kPointerSize),
@@ -3907,7 +3994,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
+ __ cmp(eax, 1);
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
__ j(equal, &success);
Label failure;
__ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
@@ -4070,11 +4159,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
- __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
+ __ mov(edx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
Factory* factory = masm->isolate()->factory();
__ mov(ecx, Immediate(factory->empty_fixed_array()));
__ lea(ebx, Operand(eax, JSRegExpResult::kSize));
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+ __ mov(edx, FieldOperand(edx, GlobalObject::kNativeContextOffset));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
__ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
@@ -4098,15 +4187,15 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Immediate(factory->fixed_array_map()));
// Set length.
__ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
- // Fill contents of fixed-array with the-hole.
+ // Fill contents of fixed-array with undefined.
__ SmiUntag(ecx);
- __ mov(edx, Immediate(factory->the_hole_value()));
+ __ mov(edx, Immediate(factory->undefined_value()));
__ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
- // Fill fixed array elements with hole.
+ // Fill fixed array elements with undefined.
// eax: JSArray.
// ecx: Number of elements to fill.
// ebx: Start of elements in FixedArray.
- // edx: the hole.
+ // edx: undefined.
Label loop;
__ test(ecx, ecx);
__ bind(&loop);
@@ -4292,13 +4381,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ jmp(&not_user_equal);
__ bind(&user_equal);
-
+
__ pop(ebx); // Return address.
__ push(eax);
__ push(edx);
__ push(ebx);
__ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
+
__ bind(&not_user_equal);
}
@@ -5676,7 +5765,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
__ and_(ecx, edi);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ecx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
@@ -5685,7 +5774,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
- if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
+ __ AssertSmi(ebx);
__ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
__ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
@@ -5704,9 +5793,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ xor_(edi, ecx);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
+ STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(edi, kOneByteStringTag | kAsciiDataHintTag);
+ __ cmp(edi, kOneByteStringTag | kAsciiDataHintTag);
__ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
@@ -6277,7 +6366,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ebx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
@@ -6324,7 +6413,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ push(edx);
__ push(edi);
__ SmiUntag(ecx);
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ test_b(ebx, kStringEncodingMask);
__ j(zero, &two_byte_sequential);
@@ -6946,8 +7035,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
ASSERT(!name.is(r0));
ASSERT(!name.is(r1));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
__ mov(r1, FieldOperand(elements, kCapacityOffset));
__ shr(r1, kSmiTagSize); // convert smi to int
@@ -7111,8 +7199,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
{ REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiOnlyToObject
- // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // ElementsTransitionGenerator::GenerateMapChangeElementTransition
+ // and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
{ REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
@@ -7121,6 +7209,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
+ // FastNewClosureStub
+ { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7169,6 +7259,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
}
+bool CodeStub::CanUseFPRegisters() {
+ return CpuFeatures::IsSupported(SSE2);
+}
+
+
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
@@ -7289,6 +7384,17 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Mode mode) {
Label object_is_black, need_incremental, need_incremental_pop_object;
+ __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ and_(regs_.scratch0(), regs_.object());
+ __ mov(regs_.scratch1(),
+ Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ sub(regs_.scratch1(), Immediate(1));
+ __ mov(Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset),
+ regs_.scratch1());
+ __ j(negative, &need_incremental);
+
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),
@@ -7384,9 +7490,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(edi, &double_elements);
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
__ JumpIfSmi(eax, &smi_element);
- __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
+ __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -7408,7 +7514,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ pop(edx);
__ jmp(&slow_elements);
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
@@ -7421,15 +7527,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK);
__ ret(0);
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
__ bind(&smi_element);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
FixedArrayBase::kHeaderSize), eax);
__ ret(0);
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ push(edx);
@@ -7445,6 +7551,38 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ ProfileEntryHookStub stub;
+ masm->CallStub(&stub);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // Ecx is the only volatile register we must save.
+ __ push(ecx);
+
+ // Calculate and push the original stack pointer.
+ __ lea(eax, Operand(esp, kPointerSize));
+ __ push(eax);
+
+ // Calculate and push the function address.
+ __ mov(eax, Operand(eax, 0));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ __ push(eax);
+
+ // Call the entry hook.
+ int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
+ __ call(Operand(hook_location, RelocInfo::NONE));
+ __ add(esp, Immediate(2 * kPointerSize));
+
+ // Restore ecx.
+ __ pop(ecx);
+ __ ret(0);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.cc b/src/3rdparty/v8/src/ia32/codegen-ia32.cc
index cff6454..4c79519 100644
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/codegen-ia32.cc
@@ -351,7 +351,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@@ -372,7 +372,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
}
-void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
@@ -732,7 +732,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Dispatch on the encoding: ASCII or two-byte.
Label ascii;
__ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii, Label::kNear);
@@ -757,6 +757,103 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
#undef __
+static const int kNoCodeAgeSequenceLength = 5;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ static bool initialized = false;
+ static byte sequence[kNoCodeAgeSequenceLength];
+ *length = kNoCodeAgeSequenceLength;
+ if (!initialized) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->push(ebp);
+ patcher.masm()->mov(ebp, esp);
+ patcher.masm()->push(esi);
+ patcher.masm()->push(edi);
+ initialized = true;
+ }
+ return sequence;
+}
+
+
+byte* Code::FindPlatformCodeAgeSequence() {
+ byte* start = instruction_start();
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (!memcmp(start, young_sequence, young_length) ||
+ *start == kCallOpcode) {
+ return start;
+ } else {
+ if (kind() == FUNCTION) {
+ byte* start_after_strict =
+ start + kSizeOfFullCodegenStrictModePrologue;
+ ASSERT(!memcmp(start_after_strict, young_sequence, young_length) ||
+ start[kSizeOfFullCodegenStrictModePrologue] == kCallOpcode);
+ return start_after_strict;
+ } else {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ start = instruction_start() + kSizeOfOptimizedStrictModePrologue;
+ if (!memcmp(start, young_sequence, young_length) ||
+ *start == kCallOpcode) {
+ return start;
+ }
+ start = instruction_start() + kSizeOfOptimizedAlignStackPrologue;
+ if (!memcmp(start, young_sequence, young_length) ||
+ *start == kCallOpcode) {
+ return start;
+ }
+ start = instruction_start() + kSizeOfOptimizedAlignStackPrologue +
+ kSizeOfOptimizedStrictModePrologue;
+ ASSERT(!memcmp(start, young_sequence, young_length) ||
+ *start == kCallOpcode);
+ return start;
+ }
+ }
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = (!memcmp(sequence, young_sequence, young_length));
+ ASSERT(result || *sequence == kCallOpcode);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAge) {
+ memcpy(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(age, parity);
+ CodePatcher patcher(sequence, young_length);
+ patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.h b/src/3rdparty/v8/src/ia32/codegen-ia32.h
index f4ab0b5..a783e9a 100644
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.h
+++ b/src/3rdparty/v8/src/ia32/codegen-ia32.h
@@ -37,6 +37,10 @@ namespace internal {
// Forward declarations
class CompilationInfo;
+static const int kSizeOfFullCodegenStrictModePrologue = 34;
+static const int kSizeOfOptimizedStrictModePrologue = 12;
+static const int kSizeOfOptimizedAlignStackPrologue = 44;
+
// -------------------------------------------------------------------------
// CodeGenerator
diff --git a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc b/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
index 73961e1..99ad522 100644
--- a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
@@ -117,6 +117,10 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (!function->IsOptimized()) return;
+ // The optimized code is going to be patched, so we cannot use it
+ // any more. Play safe and reset the whole cache.
+ function->shared()->ClearOptimizedCodeMap();
+
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
AssertNoAllocation no_allocation;
@@ -194,8 +198,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
+ ReplaceCodeForRelatedFunctions(function, code);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@@ -284,11 +287,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
}
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
@@ -310,7 +313,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// the ast id. Confusing.
ASSERT(bailout_id_ == ast_id);
- int bailout_id = LookupBailoutId(data, ast_id);
+ int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
@@ -330,9 +333,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
+ int closure_id = iterator.Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
@@ -351,10 +354,12 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
+ PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
ast_id,
input_frame_size,
- output_frame_size);
+ output_frame_size,
+ input_->GetRegister(ebp.code()),
+ input_->GetRegister(esp.code()));
}
// There's only one output frame in the OSR case.
@@ -404,7 +409,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
name = "function";
break;
}
- PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part - %s)\n",
+ PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset,
input_value,
input_offset,
@@ -415,6 +420,24 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_offset -= kPointerSize;
}
+ // All OSR stack frames are dynamically aligned to an 8-byte boundary.
+ int frame_pointer = input_->GetRegister(ebp.code());
+ if ((frame_pointer & kPointerSize) != 0) {
+ frame_pointer -= kPointerSize;
+ has_alignment_padding_ = 1;
+ }
+
+ int32_t alignment_state = (has_alignment_padding_ == 1) ?
+ kAlignmentPaddingPushed :
+ kNoAlignmentPadding;
+ if (FLAG_trace_osr) {
+ PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
+ output_offset,
+ alignment_state);
+ }
+ output_[0]->SetFrameSlot(output_offset, alignment_state);
+ output_offset -= kPointerSize;
+
// Translate the rest of the frame.
while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
@@ -427,7 +450,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+ output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@@ -436,15 +459,15 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(pc);
}
Code* continuation =
- function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
+ function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
@@ -659,16 +682,143 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (FLAG_trace_deopt) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 1 stack entry for the return address + 4 stack entries from
+ // StackFrame::INTERNAL (FP, context, frame type, code object, see
+ // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
+ // entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
@@ -688,24 +838,38 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+
+ unsigned alignment_state_offset =
+ input_offset - parameter_count * kPointerSize -
+ StandardFrameConstants::kFixedFrameSize -
+ kPointerSize;
+ ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
+ JavaScriptFrameConstants::kLocal0Offset);
+
// The top address for the bottommost output frame can be computed from
// the input frame pointer and the output frame's height. For all
// subsequent output frames, it can be computed from the previous one's
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
+ int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
+ has_alignment_padding_ =
+ (alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
// 2 = context and function in the frame.
- top_address =
- input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+ // If the optimized frame had alignment padding, adjust the frame pointer
+ // to point to the new position of the old frame pointer after padding
+ // is removed. Subtract 2 * kPointerSize for the context and function slots.
+ top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
+ height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
@@ -747,13 +911,17 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+ ASSERT(!is_bottommost ||
+ (input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
+ fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
+ ASSERT(!is_bottommost || !has_alignment_padding_ ||
+ (fp_value & kPointerSize) != 0);
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
@@ -948,6 +1116,28 @@ void Deoptimizer::EntryGenerator::Generate() {
}
__ pop(eax);
+ if (type() != OSR) {
+ // If frame was dynamically aligned, pop padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ pop(ecx);
+ if (FLAG_debug_code) {
+ __ cmp(ecx, Immediate(kAlignmentZapValue));
+ __ Assert(equal, "alignment marker expected");
+ }
+ __ bind(&no_padding);
+ } else {
+ // If frame needs dynamic alignment push padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ push(Immediate(kAlignmentZapValue));
+ __ bind(&no_padding);
+ }
+
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
diff --git a/src/3rdparty/v8/src/ia32/disasm-ia32.cc b/src/3rdparty/v8/src/ia32/disasm-ia32.cc
index b5ddcca..dd07deb 100644
--- a/src/3rdparty/v8/src/ia32/disasm-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/disasm-ia32.cc
@@ -31,6 +31,8 @@
#include "v8.h"
+#undef CONST
+
#if defined(V8_TARGET_ARCH_IA32)
#include "disasm.h"
@@ -553,6 +555,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
case 2: mnem = "not"; break;
case 3: mnem = "neg"; break;
case 4: mnem = "mul"; break;
+ case 5: mnem = "imul"; break;
case 7: mnem = "idiv"; break;
default: UnimplementedInstruction();
}
@@ -1266,6 +1269,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x56) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("orpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x57) {
data++;
int mod, regop, rm;
@@ -1296,6 +1307,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x76) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pcmpeqd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x90) {
data++;
AppendToBuffer("nop"); // 2 byte nop.
@@ -1463,6 +1482,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
switch (b2) {
case 0x2A: mnem = "cvtsi2sd"; break;
case 0x2C: mnem = "cvttsd2si"; break;
+ case 0x2D: mnem = "cvtsd2si"; break;
case 0x51: mnem = "sqrtsd"; break;
case 0x58: mnem = "addsd"; break;
case 0x59: mnem = "mulsd"; break;
@@ -1475,7 +1495,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (b2 == 0x2A) {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
data += PrintRightOperand(data);
- } else if (b2 == 0x2C) {
+ } else if (b2 == 0x2C || b2 == 0x2D) {
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
diff --git a/src/3rdparty/v8/src/ia32/frames-ia32.h b/src/3rdparty/v8/src/ia32/frames-ia32.h
index 9e51857..18915e2 100644
--- a/src/3rdparty/v8/src/ia32/frames-ia32.h
+++ b/src/3rdparty/v8/src/ia32/frames-ia32.h
@@ -53,6 +53,10 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8;
+const int kNoAlignmentPadding = 0;
+const int kAlignmentPaddingPushed = 2;
+const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
+
// ----------------------------------------------------
@@ -119,6 +123,8 @@ class JavaScriptFrameConstants : public AllStatic {
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
+
+ static const int kDynamicAlignmentStateOffset = kLocal0Offset;
};
diff --git a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
index 66cf497..c58f242 100644
--- a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
@@ -123,6 +123,8 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -136,6 +138,8 @@ void FullCodeGenerator::Generate() {
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
+ Label start;
+ __ bind(&start);
__ test(ecx, ecx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
@@ -147,6 +151,8 @@ void FullCodeGenerator::Generate() {
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
+ ASSERT(!FLAG_age_code ||
+ (kSizeOfFullCodegenStrictModePrologue == ok.pos() - start.pos()));
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -177,11 +183,14 @@ void FullCodeGenerator::Generate() {
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0 ||
(scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment cmnt(masm_, "[ Allocate local context");
+ Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in edi.
__ push(edi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -228,7 +237,7 @@ void FullCodeGenerator::Generate() {
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
- __ SafePush(Immediate(Smi::FromInt(num_parameters)));
+ __ push(Immediate(Smi::FromInt(num_parameters)));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
@@ -258,7 +267,7 @@ void FullCodeGenerator::Generate() {
scope()->VisitIllegalRedeclaration(this);
} else {
- PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
@@ -273,7 +282,7 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
@@ -318,20 +327,12 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
// Self-optimization is a one-off thing: if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
- if (isolate()->IsDebuggerActive()) {
- // Detect debug break requests as soon as possible.
- reset_value = 10;
- }
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 100;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -343,7 +344,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -405,7 +406,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -761,7 +762,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
@@ -783,11 +784,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ : isolate()->factory()->undefined_value(), zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
break;
case Variable::PARAMETER:
@@ -815,10 +816,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ push(esi);
__ push(Immediate(variable->name()));
// VariableDeclaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR || mode == LET ||
- mode == CONST || mode == CONST_HARMONY);
- PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
- ? READ_ONLY : NONE;
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -842,13 +842,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_->Add(function);
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ globals_->Add(function, zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
break;
}
@@ -899,9 +899,9 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name());
- globals_->Add(instance);
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
Visit(declaration->module());
break;
}
@@ -1098,19 +1098,28 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a map in register eax. Get the enumeration cache from it.
+ Label no_descriptors;
__ bind(&use_cache);
+
+ __ EnumLength(edx, eax);
+ __ cmp(edx, Immediate(Smi::FromInt(0)));
+ __ j(equal, &no_descriptors);
+
__ LoadInstanceDescriptors(eax, ecx);
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
- __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(eax); // Map.
- __ push(edx); // Enumeration cache.
- __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ push(eax); // Enumeration cache length (as smi).
+ __ push(ecx); // Enumeration cache.
+ __ push(edx); // Number of valid entries for the map in the enum cache.
__ push(Immediate(Smi::FromInt(0))); // Initial index.
__ jmp(&loop);
+ __ bind(&no_descriptors);
+ __ add(esp, Immediate(kPointerSize));
+ __ jmp(&exit);
+
// We got a fixed array in register eax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
@@ -1119,7 +1128,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
- RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(ebx, cell);
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
@@ -1275,9 +1284,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ mov(temp, context);
}
__ bind(&next);
- // Terminate at global context.
+ // Terminate at native context.
__ cmp(FieldOperand(temp, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->global_context_map()));
+ Immediate(isolate()->factory()->native_context_map()));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
__ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
@@ -1561,9 +1570,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
- AccessorTable accessor_table(isolate()->zone());
+ AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1589,7 +1598,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1653,7 +1662,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ bool has_constant_fast_elements =
+ IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1664,7 +1674,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
@@ -1676,10 +1686,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- ASSERT(constant_elements_kind == FAST_ELEMENTS ||
- constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
- // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1707,9 +1716,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (constant_elements_kind == FAST_ELEMENTS) {
- // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
- // transition and don't need to call the runtime stub.
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
+ // cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
@@ -1801,11 +1810,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@@ -1861,14 +1870,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1889,7 +1898,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -1974,7 +1984,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(edx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@@ -2101,7 +2112,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, ecx);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2136,37 +2147,15 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
ASSERT(prop != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- __ push(Operand(esp, kPointerSize)); // Receiver is now under value.
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(ecx, prop->key()->AsLiteral()->handle());
- if (expr->ends_initialization_block()) {
- __ mov(edx, Operand(esp, 0));
- } else {
- __ pop(edx);
- }
+ __ pop(edx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(eax); // Result of assignment, saved even if not needed.
- __ push(Operand(esp, kPointerSize)); // Receiver is under value.
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(eax);
- __ Drop(1);
- }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2178,38 +2167,14 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// esp[0] : key
// esp[kPointerSize] : receiver
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- // Receiver is now under the key and value.
- __ push(Operand(esp, 2 * kPointerSize));
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
__ pop(ecx); // Key.
- if (expr->ends_initialization_block()) {
- __ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
- } else {
- __ pop(edx);
- }
+ __ pop(edx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ pop(edx);
- __ push(eax); // Result of assignment, saved even if not needed.
- __ push(edx);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(eax);
- }
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -2224,6 +2189,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForAccumulatorValue(expr->obj());
__ mov(edx, result_register());
EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(eax);
} else {
VisitForStackValue(expr->obj());
@@ -2238,7 +2204,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
ic_total_count_++;
__ call(code, rmode, ast_id);
}
@@ -2262,7 +2228,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
SetSourcePosition(expr->position());
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2294,7 +2260,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2314,20 +2280,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code, but not in the snapshot.
- if (!Serializer::enabled()) {
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ mov(ebx, cell);
- }
+ // Record call targets in unoptimized code.
+ flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ mov(ebx, cell);
CallFunctionStub stub(arg_count, flags);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->id());
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
@@ -2502,24 +2466,18 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
SetSourcePosition(expr->position());
// Load function and argument count into edi and eax.
- __ SafeSet(eax, Immediate(arg_count));
+ __ Set(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
- // Record call targets in unoptimized code, but not in the snapshot.
- CallFunctionFlags flags;
- if (!Serializer::enabled()) {
- flags = RECORD_CALL_TARGET;
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ mov(ebx, cell);
- } else {
- flags = NO_CALL_FUNCTION_FLAGS;
- }
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ mov(ebx, cell);
- CallConstructStub stub(flags);
+ CallConstructStub stub(RECORD_CALL_TARGET);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(eax);
@@ -2660,7 +2618,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(eax);
+ __ AssertNotSmi(eax);
// Check whether this map has already been checked to be safe for default
// valueOf.
@@ -2676,45 +2634,51 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ j(equal, if_false);
// Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(ecx, ebx);
+ __ cmp(ecx, 0);
+ __ j(equal, &done);
+
__ LoadInstanceDescriptors(ebx, ebx);
- __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- // ebx: descriptor array
- // ecx: length of descriptor array
+ // ebx: descriptor array.
+ // ecx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kPointerSize == 4);
- __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+ __ imul(ecx, ecx, DescriptorArray::kDescriptorSize);
+ __ lea(ecx, Operand(ebx, ecx, times_2, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ add(ebx,
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
+ __ add(ebx, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
- Label entry, loop;
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, FieldOperand(ebx, 0));
__ cmp(edx, FACTORY->value_of_symbol());
__ j(equal, if_false);
- __ add(ebx, Immediate(kPointerSize));
+ __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmp(ebx, ecx);
__ j(not_equal, &loop);
+ __ bind(&done);
+
// Reload map as register ebx was used as temporary above.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // If a valueOf property is not found on the object check that it's
+ // If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
__ JumpIfSmi(ecx, if_false);
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(edx,
- FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+ FieldOperand(edx, GlobalObject::kNativeContextOffset));
__ cmp(ecx,
ContextOperand(edx,
Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
@@ -2860,7 +2824,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@@ -2872,7 +2836,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
Label exit;
// Get the number of formal parameters.
- __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2885,7 +2849,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
+ __ AssertSmi(eax);
context()->Plug(eax);
}
@@ -2989,8 +2953,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ bind(&heapnumber_allocated);
__ PrepareCallCFunction(1, ebx);
- __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+ __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
@@ -3077,19 +3041,18 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done;
+ Label runtime, done, not_date_object;
Register object = eax;
Register result = eax;
Register scratch = ecx;
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ JumpIfSmi(object, &not_date_object);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ Assert(equal, "Trying to get date field from non-date.");
-#endif
+ __ j(not_equal, &not_date_object);
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@@ -3105,8 +3068,12 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ mov(Operand(esp, 0), object);
__ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
+ __ jmp(&done);
}
+
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
context()->Plug(result);
}
@@ -3377,10 +3344,11 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
VisitForAccumulatorValue(args->last()); // Function.
- // Check for proxy.
- Label proxy, done;
- __ CmpObjectType(eax, JS_FUNCTION_PROXY_TYPE, ebx);
- __ j(equal, &proxy);
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &runtime);
// InvokeFunction requires the function in edi. Move it in there.
__ mov(edi, result_register());
@@ -3390,7 +3358,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
- __ bind(&proxy);
+ __ bind(&runtime);
__ push(eax);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3420,7 +3388,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
+ isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ mov(eax, isolate()->factory()->undefined_value());
@@ -3433,9 +3401,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = eax;
Register cache = ebx;
Register tmp = ecx;
- __ mov(cache, ContextOperand(esi, Context::GLOBAL_INDEX));
+ __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
__ mov(cache,
- FieldOperand(cache, GlobalObject::kGlobalContextOffset));
+ FieldOperand(cache, GlobalObject::kNativeContextOffset));
__ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ mov(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
@@ -3505,9 +3473,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(eax);
- }
+ __ AssertString(eax);
Label materialize_true, materialize_false;
Label* if_true = NULL;
@@ -3530,9 +3496,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(eax);
- }
+ __ AssertString(eax);
__ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
__ IndexFromHash(eax, eax);
@@ -3606,7 +3570,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Loop condition: while (index < length).
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ cmp(index, array_length);
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
@@ -3620,7 +3584,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
+ __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
__ add(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset));
@@ -3834,7 +3798,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
@@ -3992,7 +3956,8 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
context()->Plug(eax);
}
@@ -4050,7 +4015,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
- PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
// Call ToNumber only if operand is not a smi.
@@ -4113,7 +4078,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4147,7 +4112,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4164,7 +4129,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4372,7 +4337,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4454,7 +4419,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
- // Contexts nested in the global context have a canonical empty function
+ // Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@@ -4483,14 +4448,51 @@ void FullCodeGenerator::EnterFinallyBlock() {
STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx);
__ push(edx);
+
// Store result register while executing finally block.
__ push(result_register());
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(edx, Operand::StaticVariable(pending_message_obj));
+ __ push(edx);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(edx, Operand::StaticVariable(has_pending_message));
+ __ SmiTag(edx);
+ __ push(edx);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(edx, Operand::StaticVariable(pending_message_script));
+ __ push(edx);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
+ // Restore pending message from stack.
+ __ pop(edx);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(Operand::StaticVariable(pending_message_script), edx);
+
+ __ pop(edx);
+ __ SmiUntag(edx);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(Operand::StaticVariable(has_pending_message), edx);
+
+ __ pop(edx);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(Operand::StaticVariable(pending_message_obj), edx);
+
+ // Restore result register from stack.
__ pop(result_register());
+
// Uncook return address.
__ pop(edx);
__ SmiUntag(edx);
@@ -4526,7 +4528,6 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
return previous_;
}
-
#undef __
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/ia32/ic-ia32.cc b/src/3rdparty/v8/src/ia32/ic-ia32.cc
index dc64a09..dae3bbd 100644
--- a/src/3rdparty/v8/src/ia32/ic-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/ic-ia32.cc
@@ -747,6 +747,125 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+ // eax: value
+ // ecx: key (a smi)
+ // edx: receiver
+ // ebx: FixedArray receiver->elements
+ // edi: receiver map
+ // Fast case: Do the store, could either Object or double.
+ __ bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+ __ j(not_equal, fast_double);
+ }
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(eax, &non_smi_value);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+ __ ret(0);
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(edi, &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+ // Update write barrier for the elements array address.
+ __ mov(edx, eax); // Preserve the value which is returned.
+ __ RecordWriteArray(
+ ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ ret(0);
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+ __ j(not_equal, slow);
+ // If the value is a number, store it as a double in the FastDoubleElements
+ // array.
+ }
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0,
+ &transition_double_elements, false);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ __ ret(0);
+
+ __ bind(&transition_smi_elements);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+ // Transition the array appropriately depending on the value type.
+ __ CheckMap(eax,
+ masm->isolate()->factory()->heap_number_map(),
+ &non_double_value,
+ DONT_DO_SMI_CHECK);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
+ // and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ ebx,
+ edi,
+ slow);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ ebx,
+ edi,
+ slow);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ ebx,
+ edi,
+ slow);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
@@ -755,10 +874,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
- Label check_if_double_array, array, extra, transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
// Check that the object isn't a smi.
__ JumpIfSmi(edx, &slow);
@@ -785,7 +903,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(below, &fast_object_with_map_check);
+ __ j(below, &fast_object);
// Slow case: call runtime.
__ bind(&slow);
@@ -808,18 +926,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, &check_if_double_array);
- // Add 1 to receiver->length, and go to common element store code for Objects.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&fast_object_without_map_check);
+ __ jmp(&fast_object_grow);
__ bind(&check_if_double_array);
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
__ j(not_equal, &slow);
- // Add 1 to receiver->length, and go to common element store code for doubles.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&fast_double_without_map_check);
+ __ jmp(&fast_double_grow);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -836,94 +948,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
- // Fast case: Do the store, could either Object or double.
- __ bind(&fast_object_with_map_check);
- // eax: value
- // ecx: key (a smi)
- // edx: receiver
- // ebx: FixedArray receiver->elements
- // edi: receiver map
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, &fast_double_with_map_check);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(eax, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
- // Update write barrier for the elements array address.
- __ mov(edx, eax); // Preserve the value which is returned.
- __ RecordWriteArray(
- ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, &slow);
- __ bind(&fast_double_without_map_check);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0,
- &transition_double_elements, false);
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ CheckMap(eax,
- masm->isolate()->factory()->heap_number_map(),
- &non_double_value,
- DONT_DO_SMI_CHECK);
-
- // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- ebx,
- edi,
- &slow);
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- &slow);
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- &slow);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength);
}
@@ -943,7 +971,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
- NORMAL,
+ Code::NORMAL,
argc);
Isolate* isolate = masm->isolate();
isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
@@ -1622,7 +1650,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
__ mov(eax, edx);
__ Ret();
__ bind(&fail);
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
index cf30156..34ce1cd 100644
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
@@ -79,6 +79,10 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
+ !chunk()->graph()->is_recursive()) ||
+ !info()->osr_ast_id().IsNone();
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -95,17 +99,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LCodeGen::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -131,6 +126,8 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -143,6 +140,8 @@ bool LCodeGen::GeneratePrologue() {
// receiver object). ecx is zero for method calls and non-zero for
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
+ Label begin;
+ __ bind(&begin);
Label ok;
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
@@ -151,6 +150,39 @@ bool LCodeGen::GeneratePrologue() {
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
+ ASSERT(!FLAG_age_code ||
+ (kSizeOfOptimizedStrictModePrologue == ok.pos() - begin.pos()));
+ }
+
+
+ if (dynamic_frame_alignment_) {
+ Label begin;
+ __ bind(&begin);
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp + 4 to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(not_zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ bind(&do_not_pad);
+ ASSERT(!FLAG_age_code ||
+ (kSizeOfOptimizedAlignStackPrologue ==
+ do_not_pad.pos() - begin.pos()));
}
__ push(ebp); // Caller's frame pointer.
@@ -158,9 +190,21 @@ bool LCodeGen::GeneratePrologue() {
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
+ if (dynamic_frame_alignment_ && FLAG_debug_code) {
+ __ test(esp, Immediate(kPointerSize));
+ __ Assert(zero, "frame is expected to be aligned");
+ }
+
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
- if (slots > 0) {
+ ASSERT_GE(slots, 1);
+ if (slots == 1) {
+ if (dynamic_frame_alignment_) {
+ __ push(edx);
+ } else {
+ __ push(Immediate(kNoAlignmentPadding));
+ }
+ } else {
if (FLAG_debug_code) {
__ mov(Operand(eax), Immediate(slots));
Label loop;
@@ -170,7 +214,7 @@ bool LCodeGen::GeneratePrologue() {
__ j(not_zero, &loop);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
+ #ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
// the most recently mapped page. To make the allocated area randomly
// accessible, we write to each page in turn (the value is irrelevant).
@@ -180,7 +224,18 @@ bool LCodeGen::GeneratePrologue() {
offset -= kPageSize) {
__ mov(Operand(esp, offset), eax);
}
-#endif
+ #endif
+ }
+
+ // Store dynamic frame alignment state in the first local.
+ if (dynamic_frame_alignment_) {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ edx);
+ } else {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ Immediate(kNoAlignmentPadding));
}
}
@@ -308,24 +363,24 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
+ ASSERT(constant->HasInteger32Value());
+ return constant->Integer32Value();
}
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
+ return constant->handle();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- return value->Number();
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
}
@@ -359,7 +414,9 @@ Operand LCodeGen::HighOperand(LOperand* op) {
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -367,8 +424,20 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ // Function parameters are arguments to the outermost environment. The
+ // arguments index points to the first element of a sequence of tagged
+ // values on the stack that represent the arguments. This needs to be
+ // kept in sync with the LArgumentsElements implementation.
+ *arguments_index = -environment->parameter_count();
+ *arguments_count = environment->parameter_count();
+
+ WriteTranslation(environment->outer(),
+ translation,
+ arguments_index,
+ arguments_count);
+ int closure_id = *info()->closure() != *environment->closure()
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -376,12 +445,31 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
- default:
- UNREACHABLE();
}
+
+ // Inlined frames which push their arguments cause the index to be
+ // bumped and another stack area to be used for materialization.
+ if (environment->entry() != NULL &&
+ environment->entry()->arguments_pushed()) {
+ *arguments_index = *arguments_index < 0
+ ? GetStackSlotCount()
+ : *arguments_index + *arguments_count;
+ *arguments_count = environment->entry()->arguments_count() + 1;
+ }
+
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -392,7 +480,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -400,26 +491,39 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
- false);
+ false,
+ false,
+ *arguments_index,
+ *arguments_count);
}
}
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged) {
+ bool is_tagged,
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject();
+ translation->StoreArgumentsObject(arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
@@ -433,6 +537,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@@ -440,8 +546,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -500,9 +606,9 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
} else if (context->IsStackSlot()) {
__ mov(esi, ToOperand(context));
} else if (context->IsConstantOperand()) {
- Handle<Object> literal =
- chunk_->LookupLiteral(LConstantOperand::cast(context));
- __ LoadHeapObject(esi, Handle<Context>::cast(literal));
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
} else {
UNREACHABLE();
}
@@ -531,20 +637,22 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
int frame_count = 0;
int jsframe_count = 0;
+ int args_index = 0;
+ int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
- Translation translation(&translations_, frame_count, jsframe_count);
- WriteTranslation(environment, &translation);
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation, &args_index, &args_count);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment);
+ deoptimizations_.Add(environment, zone());
}
}
@@ -566,19 +674,22 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
__ push(eax);
__ push(ebx);
__ mov(ebx, shared);
- __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
+ __ mov(eax,
+ FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset));
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
__ j(not_zero, &no_deopt, Label::kNear);
if (FLAG_trap_on_deopt) __ int3();
__ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+ __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
+ eax);
__ pop(ebx);
__ pop(eax);
__ popfd();
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+ __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
+ eax);
__ pop(ebx);
__ pop(eax);
__ popfd();
@@ -618,13 +729,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
data->SetLiteralArray(*literals);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@@ -639,7 +750,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
- deoptimization_literals_.Add(literal);
+ deoptimization_literals_.Add(literal, zone());
return result;
}
@@ -684,9 +795,9 @@ void LCodeGen::RecordSafepoint(
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
+ safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
}
@@ -699,7 +810,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, mode);
}
@@ -808,7 +919,7 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->InputAt(0));
+ Register dividend = ToRegister(instr->left());
int32_t divisor =
HConstant::cast(instr->hydrogen()->right())->Integer32Value();
@@ -832,8 +943,8 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&done);
} else {
Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
- Register left_reg = ToRegister(instr->InputAt(0));
- Register right_reg = ToRegister(instr->InputAt(1));
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
ASSERT(left_reg.is(eax));
@@ -863,7 +974,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(less, &remainder_eq_dividend, Label::kNear);
// Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
__ mov(scratch, right_reg);
__ sub(Operand(scratch), Immediate(1));
__ test(scratch, Operand(right_reg));
@@ -919,11 +1030,11 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
- LOperand* right = instr->InputAt(1);
+ LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(ToRegister(instr->InputAt(0)).is(eax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
+ ASSERT(ToRegister(instr->left()).is(eax));
+ ASSERT(!ToRegister(instr->right()).is(eax));
+ ASSERT(!ToRegister(instr->right()).is(edx));
Register left_reg = eax;
@@ -964,12 +1075,115 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+ ASSERT(instr->right()->IsConstantOperand());
+
+ Register dividend = ToRegister(instr->left());
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
+ Register result = ToRegister(instr->result());
+
+ switch (divisor) {
+ case 0:
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+
+ case 1:
+ __ Move(result, dividend);
+ return;
+
+ case -1:
+ __ Move(result, dividend);
+ __ neg(result);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ return;
+ }
+
+ uint32_t divisor_abs = abs(divisor);
+ if (IsPowerOf2(divisor_abs)) {
+ int32_t power = WhichPowerOf2(divisor_abs);
+ if (divisor < 0) {
+ // Input[dividend] is clobbered.
+ // The sequence is tedious because neg(dividend) might overflow.
+ __ mov(result, dividend);
+ __ sar(dividend, 31);
+ __ neg(result);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ shl(dividend, 32 - power);
+ __ sar(result, power);
+ __ not_(dividend);
+ // Clear result.sign if dividend.sign is set.
+ __ and_(result, dividend);
+ } else {
+ __ Move(result, dividend);
+ __ sar(result, power);
+ }
+ } else {
+ ASSERT(ToRegister(instr->left()).is(eax));
+ ASSERT(ToRegister(instr->result()).is(edx));
+ Register scratch = ToRegister(instr->temp());
+
+ // Find b which: 2^b < divisor_abs < 2^(b+1).
+ unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
+ unsigned shift = 32 + b; // Precision +1bit (effectively).
+ double multiplier_f =
+ static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
+ int64_t multiplier;
+ if (multiplier_f - floor(multiplier_f) < 0.5) {
+ multiplier = static_cast<int64_t>(floor(multiplier_f));
+ } else {
+ multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
+ }
+ // The multiplier is a uint32.
+ ASSERT(multiplier > 0 &&
+ multiplier < (static_cast<int64_t>(1) << 32));
+ __ mov(scratch, dividend);
+ if (divisor < 0 &&
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ mov(edx, static_cast<int32_t>(multiplier));
+ __ imul(edx);
+ if (static_cast<int32_t>(multiplier) < 0) {
+ __ add(edx, scratch);
+ }
+ Register reg_lo = eax;
+ Register reg_byte_scratch = scratch;
+ if (!reg_byte_scratch.is_byte_register()) {
+ __ xchg(reg_lo, reg_byte_scratch);
+ reg_lo = scratch;
+ reg_byte_scratch = eax;
+ }
+ if (divisor < 0) {
+ __ xor_(reg_byte_scratch, reg_byte_scratch);
+ __ cmp(reg_lo, 0x40000000);
+ __ setcc(above, reg_byte_scratch);
+ __ neg(edx);
+ __ sub(edx, reg_byte_scratch);
+ } else {
+ __ xor_(reg_byte_scratch, reg_byte_scratch);
+ __ cmp(reg_lo, 0xC0000000);
+ __ setcc(above_equal, reg_byte_scratch);
+ __ add(edx, reg_byte_scratch);
+ }
+ __ sar(edx, shift - 32);
+ }
+}
+
+
void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->InputAt(0));
- LOperand* right = instr->InputAt(1);
+ Register left = ToRegister(instr->left());
+ LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ mov(ToRegister(instr->TempAt(0)), left);
+ __ mov(ToRegister(instr->temp()), left);
}
if (right->IsConstantOperand()) {
@@ -1030,12 +1244,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ test(left, Operand(left));
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
+ if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
+ } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
+ __ cmp(ToRegister(instr->temp()), Immediate(0));
+ DeoptimizeIf(less, instr->environment());
}
} else {
// Test the non-zero operand for negative sign.
- __ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
+ __ or_(ToRegister(instr->temp()), ToOperand(right));
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
@@ -1044,8 +1261,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
@@ -1085,14 +1302,21 @@ void LCodeGen::DoBitI(LBitI* instr) {
void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
if (right->IsRegister()) {
ASSERT(ToRegister(right).is(ecx));
switch (instr->op()) {
+ case Token::ROR:
+ __ ror_cl(ToRegister(left));
+ if (instr->can_deopt()) {
+ __ test(ToRegister(left), Immediate(0x80000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ break;
case Token::SAR:
__ sar_cl(ToRegister(left));
break;
@@ -1114,6 +1338,14 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count == 0 && instr->can_deopt()) {
+ __ test(ToRegister(left), Immediate(0x80000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ __ ror(ToRegister(left), shift_count);
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ sar(ToRegister(left), shift_count);
@@ -1141,8 +1373,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@@ -1171,7 +1403,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
if (BitCast<uint64_t, double>(v) == 0) {
__ xorps(res, res);
} else {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
@@ -1214,7 +1446,7 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ mov(result, FieldOperand(array, JSArray::kLengthOffset));
}
@@ -1222,14 +1454,21 @@ void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
void LCodeGen::DoFixedArrayBaseLength(
LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
}
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLength(result, map);
+}
+
+
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
// Load map into |result|.
__ mov(result, FieldOperand(input, HeapObject::kMapOffset));
@@ -1243,9 +1482,9 @@ void LCodeGen::DoElementsKind(LElementsKind* instr) {
void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->TempAt(0));
+ Register map = ToRegister(instr->temp());
ASSERT(input.is(result));
Label done;
@@ -1262,19 +1501,18 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
Label runtime, done;
ASSERT(object.is(result));
ASSERT(object.is(eax));
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ test(object, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ Assert(equal, "Trying to get date field from non-date.");
-#endif
+ DeoptimizeIf(not_equal, instr->environment());
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1299,7 +1537,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
__ not_(ToRegister(input));
}
@@ -1318,8 +1556,8 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@@ -1334,9 +1572,70 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Label return_left;
+ Condition condition = (operation == HMathMinMax::kMathMin)
+ ? less_equal
+ : greater_equal;
+ if (right->IsConstantOperand()) {
+ Operand left_op = ToOperand(left);
+ Immediate right_imm = ToInteger32Immediate(right);
+ __ cmp(left_op, right_imm);
+ __ j(condition, &return_left, Label::kNear);
+ __ mov(left_op, right_imm);
+ } else {
+ Register left_reg = ToRegister(left);
+ Operand right_op = ToOperand(right);
+ __ cmp(left_reg, right_op);
+ __ j(condition, &return_left, Label::kNear);
+ __ mov(left_reg, right_op);
+ }
+ __ bind(&return_left);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+ XMMRegister left_reg = ToDoubleRegister(left);
+ XMMRegister right_reg = ToDoubleRegister(right);
+ __ ucomisd(left_reg, right_reg);
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(left_reg, xmm_scratch);
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ __ orpd(left_reg, right_reg);
+ } else {
+ // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
+ __ addsd(left_reg, right_reg);
+ }
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ ucomisd(left_reg, left_reg); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear); // left == NaN.
+ __ bind(&return_right);
+ __ movsd(left_reg, right_reg);
+
+ __ bind(&return_left);
+ }
+}
+
+
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->InputAt(0));
- XMMRegister right = ToDoubleRegister(instr->InputAt(1));
+ XMMRegister left = ToDoubleRegister(instr->left());
+ XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
// Modulo uses a fixed result register.
ASSERT(instr->op() == Token::MOD || left.is(result));
@@ -1422,17 +1721,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
__ test(reg, Operand(reg));
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister reg = ToDoubleRegister(instr->value());
__ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ cmp(reg, factory()->true_value());
@@ -1480,7 +1779,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register map = no_reg; // Keep the compiler happy.
if (expected.NeedsMap()) {
- map = ToRegister(instr->TempAt(0));
+ map = ToRegister(instr->temp());
ASSERT(!map.is(reg));
__ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
@@ -1573,8 +1872,8 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
@@ -1610,8 +1909,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Operand right = ToOperand(instr->InputAt(1));
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1621,7 +1920,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
+ Register left = ToRegister(instr->left());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1631,7 +1930,7 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely
@@ -1661,7 +1960,7 @@ void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
@@ -1694,8 +1993,8 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1720,8 +2019,8 @@ Condition LCodeGen::EmitIsString(Register input,
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1734,7 +2033,7 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Operand input = ToOperand(instr->InputAt(0));
+ Operand input = ToOperand(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1745,8 +2044,8 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1816,8 +2115,8 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1832,12 +2131,10 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
+ __ AssertString(input);
__ mov(result, FieldOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -1846,7 +2143,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1923,9 +2220,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
Handle<String> class_name = instr->hydrogen()->class_name();
@@ -1942,7 +2239,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
@@ -1985,11 +2282,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->InputAt(1));
- Register temp = ToRegister(instr->TempAt(0));
+ Register object = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
// A Smi is not an instance of anything.
__ JumpIfSmi(object, &false_result);
@@ -1998,7 +2295,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
Label cache_miss;
- Register map = ToRegister(instr->TempAt(0));
+ Register map = ToRegister(instr->temp());
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<JSGlobalPropertyCell> cache_cell =
@@ -2049,7 +2346,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// register which is pushed last by PushSafepointRegisters as top of the
// stack is used to pass the offset to the location of the map check to
// the stub.
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 13;
@@ -2098,8 +2395,25 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (dynamic_frame_alignment_) {
+ // Fetch the state of the dynamic frame alignment.
+ __ mov(edx, Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
+ }
__ mov(esp, ebp);
__ pop(ebp);
+ if (dynamic_frame_alignment_) {
+ Label no_padding;
+ __ cmp(edx, Immediate(kNoAlignmentPadding));
+ __ j(equal, &no_padding);
+ if (FLAG_debug_code) {
+ __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
+ Immediate(kAlignmentZapValue));
+ __ Assert(equal, "expected alignment marker");
+ }
+ __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
+ __ bind(&no_padding);
+ }
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@@ -2199,7 +2513,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWriteContextSlot(context,
offset,
@@ -2229,12 +2543,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name) {
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
- type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
- if (lookup.type() == FIELD) {
+ type->LookupDescriptor(NULL, *name, &lookup);
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2246,9 +2560,23 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
- } else {
+ } else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
+ Heap* heap = type->GetHeap();
+ while (*current != heap->null_value()) {
+ __ LoadHeapObject(result, current);
+ __ cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Handle<Map>(current->map()));
+ DeoptimizeIf(not_equal, env);
+ current =
+ Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
+ }
+ __ mov(result, factory()->undefined_value());
}
}
@@ -2270,6 +2598,22 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
}
+// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
+// prototype chain, which causes unbounded code generation.
+static bool CompactEmit(SmallMapList* list,
+ Handle<String> name,
+ int i,
+ Isolate* isolate) {
+ Handle<Map> map = list->at(i);
+ // If the map has ElementsKind transitions, we will generate map checks
+ // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
+ if (map->HasElementsTransition()) return false;
+ LookupResult lookup(isolate);
+ map->LookupDescriptor(NULL, *name, &lookup);
+ return lookup.IsField() || lookup.IsConstantFunction();
+}
+
+
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
@@ -2283,18 +2627,32 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
}
Handle<String> name = instr->hydrogen()->name();
Label done;
+ bool all_are_compact = true;
+ for (int i = 0; i < map_count; ++i) {
+ if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
+ all_are_compact = false;
+ break;
+ }
+ }
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ Label check_passed;
+ __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
} else {
Label next;
- __ j(not_equal, &next, Label::kNear);
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- __ jmp(&done, Label::kNear);
+ bool compact = all_are_compact ? true :
+ CompactEmit(instr->hydrogen()->types(), name, i, isolate());
+ __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
+ __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
__ bind(&next);
}
}
@@ -2320,7 +2678,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register function = ToRegister(instr->function());
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
// Check that the function really is a function.
@@ -2362,7 +2720,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->object());
__ mov(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done, ok, fail;
@@ -2378,8 +2736,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
__ and_(temp, Map::kElementsKindMask);
__ shr(temp, Map::kElementsKindShift);
- __ cmp(temp, FAST_ELEMENTS);
- __ j(equal, &ok, Label::kNear);
+ __ cmp(temp, GetInitialFastElementsKind());
+ __ j(less, &fail, Label::kNear);
+ __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
+ __ j(less_equal, &ok, Label::kNear);
__ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
__ j(less, &fail, Label::kNear);
__ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
@@ -2396,7 +2756,7 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->object());
__ mov(result, FieldOperand(input,
ExternalArray::kExternalPointerOffset));
}
@@ -2407,67 +2767,153 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register length = ToRegister(instr->length());
Operand index = ToOperand(instr->index());
Register result = ToRegister(instr->result());
-
- __ sub(length, index);
- DeoptimizeIf(below_equal, instr->environment());
-
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more.
+ __ sub(length, index);
__ mov(result, Operand(arguments, length, times_4, kPointerSize));
}
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ if (ExternalArrayOpRequiresTemp<HLoadKeyed>(instr->hydrogen())) {
+ __ SmiUntag(ToRegister(instr->key()));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ 0,
+ instr->additional_index()));
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movdbl(ToDoubleRegister(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ movsx_b(result, operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ movzx_b(result, operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ movsx_w(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movzx_w(result, operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ mov(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ mov(result, operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ test(result, Operand(result));
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ offset,
+ instr->additional_index());
+ __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ Operand double_load_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+ __ movdbl(result, double_load_operand);
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register result = ToRegister(instr->result());
// Load the result.
__ mov(result,
BuildFastArrayOperand(instr->elements(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index()));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
}
}
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
- XMMRegister result = ToDoubleRegister(instr->result());
-
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
- __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movdbl(result, double_load_operand);
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
}
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
uint32_t offset,
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
+ // Even though the HLoad/StoreKeyed instructions force the input
+ // representation for the key to be an integer, the input gets replaced during
+ // bound check elimination with the index argument to the bounds check, which
+ // can be tagged, so that case must be handled here, too.
+ if (key_representation.IsTagged() && (shift_size >= 1)) {
+ shift_size -= kSmiTagSize;
+ }
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
@@ -2486,61 +2932,6 @@ Operand LCodeGen::BuildFastArrayOperand(
}
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movdbl(ToDoubleRegister(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsx_b(result, operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movzx_b(result, operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsx_w(result, operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzx_w(result, operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ mov(result, operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(result, operand);
- __ test(result, Operand(result));
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(negative, instr->environment());
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
@@ -2581,7 +2972,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Operand elem = ToOperand(instr->InputAt(0));
+ Operand elem = ToOperand(instr->elements());
Register result = ToRegister(instr->result());
Label done;
@@ -2605,7 +2996,7 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@@ -2618,12 +3009,12 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok, Label::kNear);
+ __ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
// Do not transform the receiver to object for builtins.
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok, Label::kNear);
+ __ j(not_equal, &receiver_ok);
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
@@ -2643,7 +3034,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// if it's better to use it than to explicitly fetch it from the context
// here.
__ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+ __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
__ mov(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
@@ -2693,7 +3084,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
+ LOperand* argument = instr->value();
EmitPushTaggedOperand(argument);
}
@@ -2705,7 +3096,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ LoadHeapObject(result, instr->hydrogen()->closure());
+ __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -2724,7 +3115,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(esi));
+ ASSERT(ToRegister(instr->context()).is(esi));
__ push(esi); // The context is the first argument.
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
@@ -2735,7 +3126,10 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ mov(result, Operand(context, Context::SlotOffset(instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX)));
+ __ mov(result,
+ Operand(context, Context::SlotOffset(instr->qml_global()
+ ? Context::QML_GLOBAL_OBJECT_INDEX
+ : Context::GLOBAL_OBJECT_INDEX)));
}
@@ -2762,17 +3156,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadHeapObject(edi, function);
}
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
-
- if (change_context) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- } else {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
+ // Change context.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Set eax to arguments count if adaption is not needed. Assumes that eax
// is available to write to at this point.
@@ -2901,7 +3286,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -2935,8 +3320,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
} else {
- Label negative_sign;
- Label done;
+ Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
@@ -2962,9 +3346,9 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DeoptimizeIf(equal, instr->environment());
__ jmp(&done, Label::kNear);
- // Non-zero negative reaches here
+ // Non-zero negative reaches here.
__ bind(&negative_sign);
- // Truncate, then compare and compensate
+ // Truncate, then compare and compensate.
__ cvttsd2si(output_reg, Operand(input_reg));
__ cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
@@ -3066,11 +3450,11 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
- ToDoubleRegister(instr->InputAt(1)).is(xmm1));
- ASSERT(!instr->InputAt(1)->IsRegister() ||
- ToRegister(instr->InputAt(1)).is(eax));
- ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(xmm1));
+ ASSERT(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(eax));
+ ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsTagged()) {
@@ -3103,21 +3487,21 @@ void LCodeGen::DoRandom(LRandom* instr) {
LRandom* instr_;
};
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+ ASSERT(ToRegister(instr->global_object()).is(eax));
// Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+ __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
- // ebx: FixedArray of the global context's random seeds
+ // ebx: FixedArray of the native context's random seeds
// Load state[0].
__ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
@@ -3362,7 +3746,22 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
int offset = instr->offset();
if (!instr->transition().is_null()) {
- __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+ if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+ } else {
+ Register temp = ToRegister(instr->temp());
+ Register temp_map = ToRegister(instr->temp_map());
+ __ mov(temp_map, instr->transition());
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
+ // Update the write barrier for the map field.
+ __ RecordWriteField(object,
+ HeapObject::kMapOffset,
+ temp_map,
+ temp,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ }
}
// Do the store.
@@ -3372,7 +3771,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->is_in_object()) {
__ mov(FieldOperand(object, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
offset,
@@ -3383,7 +3782,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
check_needed);
}
} else {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
__ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(FieldOperand(temp, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -3414,10 +3813,36 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand) {
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
+ if (operand->IsRegister()) {
+ __ test(ToRegister(operand), Immediate(kSmiTagMask));
+ } else {
+ __ test(ToOperand(operand), Immediate(kSmiTagMask));
+ }
+ DeoptimizeIf(not_zero, environment);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->length(),
+ instr->length());
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->index(),
+ instr->index());
if (instr->index()->IsConstantOperand()) {
- __ cmp(ToOperand(instr->length()),
- Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
+ __ cmp(ToOperand(instr->length()),
+ Immediate(Smi::FromInt(constant_index)));
+ } else {
+ __ cmp(ToOperand(instr->length()), Immediate(constant_index));
+ }
DeoptimizeIf(below_equal, instr->environment());
} else {
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
@@ -3426,14 +3851,18 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
+ if (ExternalArrayOpRequiresTemp<HStoreKeyed>(instr->hydrogen())) {
+ __ SmiUntag(ToRegister(instr->key()));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
@@ -3457,9 +3886,12 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3469,14 +3901,41 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ XMMRegister value = ToDoubleRegister(instr->value());
+
+ if (instr->NeedsCanonicalization()) {
+ Label have_value;
+
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
+
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ bind(&have_value);
+ }
+
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+ __ movdbl(double_store_operand, value);
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
+ Register elements = ToRegister(instr->elements());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Operand operand = BuildFastArrayOperand(
- instr->object(),
+ instr->elements(),
instr->key(),
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
@@ -3499,29 +3958,15 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
}
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- XMMRegister value = ToDoubleRegister(instr->value());
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
- __ bind(&have_value);
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases...external, fast-double, fast
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
}
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movdbl(double_store_operand, value);
}
@@ -3540,7 +3985,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register new_map_reg = ToRegister(instr->new_map_temp());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
@@ -3548,26 +3993,35 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
ElementsKind to_kind = to_map->elements_kind();
Label not_applicable;
+ bool is_simple_map_transition =
+ IsSimpleMapChangeTransition(from_kind, to_kind);
+ Label::Distance branch_distance =
+ is_simple_map_transition ? Label::kNear : Label::kFar;
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
- __ j(not_equal, &not_applicable);
- __ mov(new_map_reg, to_map);
- if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ __ j(not_equal, &not_applicable, branch_distance);
+ if (is_simple_map_transition) {
Register object_reg = ToRegister(instr->object());
- __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ Handle<Map> map = instr->hydrogen()->transitioned_map();
+ __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
+ Immediate(map));
// Write barrier.
- ASSERT_NE(instr->temp_reg(), NULL);
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- ToRegister(instr->temp_reg()), kDontSaveFPRegs);
- } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
- to_kind == FAST_DOUBLE_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ASSERT_NE(instr->temp(), NULL);
+ __ RecordWriteForMap(object_reg, to_map, new_map_reg,
+ ToRegister(instr->temp()),
+ kDontSaveFPRegs);
+ } else if (IsFastSmiElementsKind(from_kind) &&
+ IsFastDoubleElementsKind(to_kind)) {
+ __ mov(new_map_reg, to_map);
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
- } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ } else if (IsFastDoubleElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind)) {
+ __ mov(new_map_reg, to_map);
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg);
@@ -3592,7 +4046,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
};
DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
factory(),
@@ -3628,9 +4082,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
instr, instr->context());
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(eax);
- }
+ __ AssertSmi(eax);
__ SmiUntag(eax);
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -3648,7 +4100,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
};
DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -3700,7 +4152,7 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
@@ -3708,43 +4160,89 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ LOperand* temp = instr->temp();
+
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI: public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
+ }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTag(reg);
__ j(overflow, deferred->entry());
__ bind(deferred->exit());
}
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ cmp(reg, Immediate(Smi::kMaxValue));
+ __ j(above, deferred->entry());
+ __ SmiTag(reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness) {
Label slow;
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
Label done;
- __ SmiUntag(reg);
- __ xor_(reg, 0x80000000);
- __ cvtsi2sd(xmm0, Operand(reg));
+
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ __ SmiUntag(reg);
+ __ xor_(reg, 0x80000000);
+ __ cvtsi2sd(xmm0, Operand(reg));
+ } else {
+ __ LoadUint32(xmm0, reg, xmm1);
+ }
+
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, no_reg, &slow);
__ jmp(&done, Label::kNear);
@@ -3787,11 +4285,11 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->TempAt(0));
+ Register tmp = ToRegister(instr->temp());
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
} else {
@@ -3824,7 +4322,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
__ SmiTag(ToRegister(input));
@@ -3832,11 +4330,13 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(ToRegister(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ __ AssertSmi(ToRegister(input));
}
__ SmiUntag(ToRegister(input));
}
@@ -3897,7 +4397,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3938,7 +4438,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
__ add(Operand(esp), Immediate(kDoubleSize));
} else {
- XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
+ XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ cmp(input_reg, 0x80000000u);
@@ -3955,7 +4455,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(not_equal, instr->environment());
- XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
+ XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ cvtsi2sd(xmm_temp, Operand(input_reg));
@@ -3985,13 +4485,13 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LTaggedToI* instr_;
};
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -4004,9 +4504,9 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
- LOperand* temp = instr->TempAt(0);
+ LOperand* temp = instr->temp();
ASSERT(temp == NULL || temp->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
@@ -4028,7 +4528,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
@@ -4066,7 +4566,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ bind(&done);
} else {
Label done;
- Register temp_reg = ToRegister(instr->TempAt(0));
+ Register temp_reg = ToRegister(instr->temp());
XMMRegister xmm_scratch = xmm0;
// If cvttsd2si succeeded, we're done. Otherwise, we attempt
@@ -4145,22 +4645,22 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
@@ -4230,7 +4730,7 @@ void LCodeGen::DoCheckMapCommon(Register reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
@@ -4296,7 +4796,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register reg = ToRegister(instr->TempAt(0));
+ ASSERT(instr->temp()->Equals(instr->result()));
+ Register reg = ToRegister(instr->temp());
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
@@ -4332,10 +4833,11 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
LAllocateObject* instr_;
};
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
@@ -4368,7 +4870,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
__ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
if (FLAG_debug_code) {
- __ AbortIfSmi(map);
+ __ AssertNotSmi(map);
__ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
instance_size >> kPointerSizeLog2);
__ Assert(equal, "Unexpected instance size");
@@ -4420,14 +4922,15 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
- Heap* heap = isolate()->heap();
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
+ if (CanTransitionToMoreGeneralFastElementsKind(
+ boilerplate_elements_kind, true)) {
__ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,
@@ -4440,12 +4943,11 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
}
// Set up the parameters to the stub/runtime call.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+ __ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
- __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
+ __ push(Immediate(isolate()->factory()->empty_fixed_array()));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -4549,8 +5051,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<FixedDoubleArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
- int32_t value_low = value & 0xFFFFFFFF;
- int32_t value_high = value >> 32;
+ int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
+ int32_t value_high = static_cast<int32_t>(value >> 32);
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ mov(FieldOperand(result, total_offset), Immediate(value_low));
@@ -4589,8 +5091,9 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
// Deopt if the literal boilerplate ElementsKind is of a type different than
// the expected one. The check isn't necessary if the boilerplate has already
- // been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
+ if (CanTransitionToMoreGeneralFastElementsKind(
+ boilerplate_elements_kind, true)) {
__ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,
@@ -4653,7 +5156,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+ ASSERT(ToRegister(instr->value()).is(eax));
__ push(eax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
@@ -4663,15 +5166,13 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Label materialized;
// Registers will be used as follows:
- // edi = JS function.
// ecx = literals array.
// ebx = regexp literal.
// eax = regexp literal clone.
// esi = context.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(ecx, instr->hydrogen()->literals());
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, factory()->undefined_value());
__ j(not_equal, &materialized, Label::kNear);
@@ -4735,14 +5236,14 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->InputAt(1);
+ LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -4826,7 +5327,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -4945,7 +5446,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr);
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
@@ -5027,11 +5528,20 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ cmp(result, Immediate(Smi::FromInt(0)));
+ __ j(not_equal, &load_cache);
+ __ mov(result, isolate()->factory()->empty_fixed_array());
+ __ jmp(&done);
+
+ __ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ mov(result,
- FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ mov(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ bind(&done);
__ test(result, result);
DeoptimizeIf(equal, instr->environment());
}
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
index 392bca2..44ddaff 100644
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
+++ b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
@@ -47,20 +47,24 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : chunk_(chunk),
+ : zone_(info->zone()),
+ chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4),
- deoptimization_literals_(8),
+ deoptimizations_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- deferred_(8),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ dynamic_frame_alignment_(false),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -72,6 +76,7 @@ class LCodeGen BASE_EMBEDDED {
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
+ Zone* zone() const { return zone_; }
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
@@ -100,7 +105,12 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness);
+
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -119,7 +129,10 @@ class LCodeGen BASE_EMBEDDED {
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
+ void WriteTranslation(LEnvironment* environment,
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count);
void EnsureRelocSpaceForDeoptimization();
@@ -145,7 +158,7 @@ class LCodeGen BASE_EMBEDDED {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -161,10 +174,10 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
void Comment(const char* format, ...);
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
@@ -228,7 +241,10 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged);
+ bool is_tagged,
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -241,6 +257,7 @@ class LCodeGen BASE_EMBEDDED {
double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
uint32_t offset,
uint32_t additional_index = 0);
@@ -278,6 +295,10 @@ class LCodeGen BASE_EMBEDDED {
bool deoptimize_on_minus_zero,
LEnvironment* env);
+ void DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand);
+
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -308,7 +329,8 @@ class LCodeGen BASE_EMBEDDED {
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name);
+ Handle<String> name,
+ LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
@@ -318,12 +340,19 @@ class LCodeGen BASE_EMBEDDED {
int* offset);
void EnsureSpaceForLazyDeopt();
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
- LChunk* const chunk_;
+ Zone* zone_;
+ LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -337,6 +366,7 @@ class LCodeGen BASE_EMBEDDED {
Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
+ bool dynamic_frame_alignment_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
diff --git a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 510d9f1..6428916 100644
--- a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -37,7 +37,7 @@ namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
- moves_(32),
+ moves_(32, owner->zone()),
source_uses_(),
destination_uses_(),
spilled_register_(-1) {}
@@ -157,7 +157,7 @@ void LGapResolver::AddMove(LMoveOperands move) {
LOperand* destination = move.destination();
if (destination->IsRegister()) ++destination_uses_[destination->index()];
- moves_.Add(move);
+ moves_.Add(move, cgen_->zone());
}
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-ia32.cc
index fb408a1..dcc5b77 100644
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/lithium-ia32.cc
@@ -179,6 +179,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "sal-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -196,22 +197,22 @@ void LGoto::PrintDataTo(StringStream* stream) {
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
@@ -220,57 +221,57 @@ void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
- InputAt(1)->PrintTo(stream);
- InputAt(2)->PrintTo(stream);
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@@ -280,7 +281,7 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
@@ -294,34 +295,34 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LMathPowHalf::PrintDataTo(StringStream* stream) {
stream->Add("/pow_half ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add(" ");
- InputAt(1)->PrintTo(stream);
+ function()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
@@ -350,7 +351,9 @@ void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
@@ -366,55 +369,23 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (is_double) {
+ spill_slot_count_++;
+ spill_slot_count_ |= 1;
+ num_double_slots_++;
+ }
return spill_slot_count_++;
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
- return LDoubleStackSlot::Create(index);
+ return LDoubleStackSlot::Create(index, zone());
} else {
- return LStackSlot::Create(index);
- }
-}
-
-
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
+ return LStackSlot::Create(index, zone());
}
}
@@ -437,16 +408,7 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -470,85 +432,17 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap);
- index = instructions_.length();
- instructions_.Add(instr);
- } else {
- index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
+LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new(zone()) LChunk(info(), graph());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // Reserve the first spill slot for the state of dynamic alignment.
+ int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ ASSERT_EQ(alignment_state_index, 0);
+ USE(alignment_state_index);
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -561,17 +455,8 @@ LChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LChunkBuilder::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -742,7 +627,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
@@ -764,7 +649,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
@@ -840,13 +725,16 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift by 0 and
// the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
}
}
}
@@ -980,8 +868,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber ||
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result =
@@ -991,7 +879,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->parameter_count(),
argument_count_,
value_count,
- outer);
+ outer,
+ hydrogen_env->entry(),
+ zone());
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1005,7 +895,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
} else {
op = UseAny(value);
}
- result->AddValue(op, value->representation());
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
@@ -1269,6 +1161,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1335,12 +1232,57 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- UNIMPLEMENTED();
+HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
+ // A value with an integer representation does not need to be transformed.
+ if (dividend->representation().IsInteger32()) {
+ return dividend;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (dividend->IsChange() &&
+ HChange::cast(dividend)->from().IsInteger32()) {
+ return HChange::cast(dividend)->value();
+ }
return NULL;
}
+HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
+ if (divisor->IsConstant() &&
+ HConstant::cast(divisor)->HasInteger32Value()) {
+ HConstant* constant_val = HConstant::cast(divisor);
+ return constant_val->CopyToRepresentation(Representation::Integer32(),
+ divisor->block()->zone());
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ HValue* right = instr->right();
+ ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
+ LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
+ int32_t divisor_si = HConstant::cast(right)->Integer32Value();
+ if (divisor_si == 0) {
+ LOperand* dividend = UseRegister(instr->left());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
+ } else if (IsPowerOf2(abs(divisor_si))) {
+ // use dividend as temp if divisor < 0 && divisor != -1
+ LOperand* dividend = divisor_si < -1 ? UseTempRegister(instr->left()) :
+ UseRegisterAtStart(instr->left());
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
+ return divisor_si < 0 ? AssignEnvironment(result) : result;
+ } else {
+ // needs edx:eax, plus a temp
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* temp = TempRegister();
+ LInstruction* result = DefineFixed(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, temp), edx);
+ return divisor_si < 0 ? AssignEnvironment(result) : result;
+ }
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@@ -1449,6 +1391,26 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ right = UseOrConstantAtStart(instr->MostConstantOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+ return DefineSameAsFirst(minmax);
+}
+
+
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
@@ -1544,7 +1506,7 @@ LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
- return new LIsStringAndBranch(UseRegister(instr->value()), temp);
+ return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
}
@@ -1570,7 +1532,7 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
- LStringCompareAndBranch* result = new
+ LStringCompareAndBranch* result = new(zone())
LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
@@ -1625,6 +1587,12 @@ LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
}
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
@@ -1642,7 +1610,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* date = UseFixed(instr->value(), eax);
LDateField* result =
new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1695,8 +1663,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
- if (needs_check) {
+ if (instr->value()->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
+ } else {
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp =
(truncating && CpuFeatures::IsSupported(SSE3))
@@ -1704,8 +1673,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
: FixedTemp(xmm1);
LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
- } else {
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
}
}
} else if (from.IsDouble()) {
@@ -1733,14 +1700,24 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp = FixedTemp(xmm1);
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
ASSERT(to.IsDouble());
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp = FixedTemp(xmm1);
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(Use(instr->value())));
+ }
}
}
UNREACHABLE();
@@ -1763,9 +1740,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp = TempRegister();
+ LUnallocated* temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(result);
+ return AssignEnvironment(Define(result, temp));
}
@@ -1799,7 +1776,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
Representation input_rep = value->representation();
if (input_rep.IsDouble()) {
LOperand* reg = UseRegister(value);
- return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+ return DefineFixed(new(zone()) LClampDToUint8(reg), eax);
} else if (input_rep.IsInteger32()) {
LOperand* reg = UseFixed(value, eax);
return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
@@ -1952,51 +1929,35 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ ElementsKind elements_kind = instr->elements_kind();
LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
+ LOperand* key = instr->is_external() &&
+ ExternalArrayOpRequiresTemp<HLoadKeyed>(instr)
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+#ifdef DEBUG
+ if (instr->is_external()) {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ }
+#endif
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ LLoadKeyed* result = new(zone()) LLoadKeyed(elements, key);
+ DefineAsRegister(result);
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS)
- ? AssignEnvironment(load_instr)
- : load_instr;
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -2011,66 +1972,58 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* obj = UseRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* val = NULL;
- if (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // We need a byte register in this case for the value.
- val = UseFixed(instr->value(), eax);
+ LOperand* elements;
+ LOperand* val;
+ LOperand* key;
+
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ if (instr->NeedsWriteBarrier() &&
+ !IsFastDoubleElementsKind(elements_kind)) {
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ elements = UseRegister(instr->elements());
+ } else {
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ elements = UseRegisterAtStart(instr->elements());
+ }
} else {
- val = UseRegister(instr->value());
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->value()->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+
+ if (ExternalArrayOpRequiresTemp<HStoreKeyed>(instr)) {
+ key = UseTempRegister(instr->key());
+ elements = UseRegister(instr->elements());
+ } else {
+ key = UseRegisterOrConstantAtStart(instr->key());
+ elements = UseRegisterAtStart(instr->elements());
+ }
+
+ // Determine if we need a byte register in this case for the value.
+ bool val_is_fixed_register =
+ elements_kind == EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS;
+ val = val_is_fixed_register
+ ? UseFixed(instr->value(), eax)
+ : UseRegister(instr->value());
}
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ LStoreKeyed* result = new(zone()) LStoreKeyed(elements, key, val);
+ ASSERT(result != NULL);
+ return result;
}
@@ -2092,8 +2045,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
- instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ ElementsKind from_kind = instr->original_map()->elements_kind();
+ ElementsKind to_kind = instr->transitioned_map()->elements_kind();
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
@@ -2115,6 +2069,8 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
+ bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
@@ -2122,7 +2078,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
- obj = UseRegisterAtStart(instr->object());
+ obj = needs_write_barrier_for_map
+ ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
}
LOperand* val = needs_write_barrier
@@ -2131,11 +2089,13 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
- ? TempRegister()
- : NULL;
+ LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
+ needs_write_barrier_for_map) ? TempRegister() : NULL;
+
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
}
@@ -2237,6 +2197,7 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2277,12 +2238,10 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2330,7 +2289,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
+ if (!pending_deoptimization_ast_id_.IsNone()) {
ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
@@ -2339,7 +2298,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+ pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
@@ -2368,10 +2327,11 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->function(),
undefined,
instr->call_kind(),
- instr->is_construct());
+ instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
+ inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2383,7 +2343,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* env = current_block_->last_environment();
- if (instr->arguments_pushed()) {
+ if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count;
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.h b/src/3rdparty/v8/src/ia32/lithium-ia32.h
index 16b5610..a1adb01 100644
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.h
+++ b/src/3rdparty/v8/src/ia32/lithium-ia32.h
@@ -102,6 +102,7 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -109,7 +110,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -119,18 +119,20 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastElement) \
- V(LoadKeyedFastDoubleElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
+ V(MathFloorOfDiv) \
+ V(MathMinMax) \
V(MathPowHalf) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
+ V(NumberTagU) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
@@ -148,15 +150,14 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@@ -252,11 +253,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -265,6 +261,15 @@ class LInstruction: public ZoneObject {
#endif
private:
+ // Iterator support.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -284,16 +289,18 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- int InputCount() { return I; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() { return I; }
+ virtual LOperand* InputAt(int i) { return inputs_[i]; }
+
+ virtual int TempCount() { return T; }
+ virtual LOperand* TempAt(int i) { return temps_[i]; }
};
@@ -327,8 +334,10 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
return parallel_moves_[pos];
}
@@ -411,11 +420,11 @@ class LCallStub: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
- LOperand* context() { return inputs_[0]; }
-
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
@@ -455,10 +464,11 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
};
@@ -474,12 +484,12 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
inputs_[3] = elements;
}
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* length() { return inputs_[2]; }
LOperand* elements() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
};
@@ -491,12 +501,12 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
inputs_[2] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
LOperand* arguments() { return inputs_[0]; }
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
virtual void PrintDataTo(StringStream* stream);
};
@@ -507,6 +517,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = elements;
}
+ LOperand* elements() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
@@ -526,6 +538,10 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
@@ -539,11 +555,33 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
+class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathFloorOfDiv(LOperand* left,
+ LOperand* right,
+ LOperand* temp = NULL) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -552,6 +590,10 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
@@ -564,6 +606,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
@@ -619,6 +664,9 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
};
@@ -630,6 +678,8 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = left;
}
+ LOperand* left() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
@@ -643,6 +693,9 @@ class LIsNilAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
@@ -660,6 +713,9 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
virtual void PrintDataTo(StringStream* stream);
@@ -673,6 +729,9 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
virtual void PrintDataTo(StringStream* stream);
@@ -685,6 +744,8 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
@@ -699,6 +760,9 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
@@ -714,6 +778,9 @@ class LStringCompareAndBranch: public LControlInstruction<3, 0> {
inputs_[2] = right;
}
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
@@ -731,6 +798,9 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
@@ -745,6 +815,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
@@ -756,8 +828,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
+
virtual void PrintDataTo(StringStream* stream);
};
@@ -768,6 +843,8 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
@@ -781,6 +858,10 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
@@ -812,9 +893,9 @@ class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-
LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
@@ -826,6 +907,9 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
@@ -854,6 +938,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@@ -864,10 +949,13 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
- Token::Value op() const { return hydrogen()->op(); }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ Token::Value op() const { return hydrogen()->op(); }
};
@@ -879,12 +967,14 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
- Token::Value op() const { return op_; }
-
- bool can_deopt() const { return can_deopt_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+ Token::Value op() const { return op_; }
+ bool can_deopt() const { return can_deopt_; }
+
private:
Token::Value op_;
bool can_deopt_;
@@ -898,6 +988,9 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
@@ -918,6 +1011,8 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -936,11 +1031,14 @@ class LConstantT: public LTemplateInstruction<1, 0, 0> {
class LBranch: public LControlInstruction<1, 1> {
public:
- explicit LBranch(LOperand* value, LOperand* temp) {
+ LBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
@@ -954,6 +1052,8 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
@@ -975,6 +1075,8 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
@@ -986,18 +1088,34 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
"fixed-array-base-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
+class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
@@ -1010,6 +1128,9 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
};
@@ -1023,6 +1144,9 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* date() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
DECLARE_HYDROGEN_ACCESSOR(DateField)
@@ -1053,6 +1177,8 @@ class LBitNotI: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
@@ -1064,11 +1190,29 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
+class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
@@ -1076,6 +1220,9 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
@@ -1087,6 +1234,8 @@ class LRandom: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
+ LOperand* global_object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
@@ -1100,6 +1249,9 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
Token::Value op() const { return op_; }
virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
@@ -1123,14 +1275,15 @@ class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
Token::Value op() const { return op_; }
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
private:
Token::Value op_;
@@ -1153,10 +1306,10 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-
- LOperand* object() { return inputs_[0]; }
};
@@ -1167,11 +1320,11 @@ class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = object;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
};
@@ -1182,11 +1335,12 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = object;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
@@ -1198,10 +1352,11 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* function() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
};
@@ -1211,6 +1366,8 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
@@ -1221,62 +1378,50 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return hydrogen()->is_external();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- }
+template <class T>
+inline static bool ExternalArrayOpRequiresTemp(T* value) {
+ CHECK(value->IsLoadKeyed() || value->IsStoreKeyed());
+ Representation key_representation = value->key()->representation();
+ ElementsKind elements_kind = value->elements_kind();
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
+ // Operations that require the key to be divided by two to be converted into
+ // an index cannot fold the scale operation into a load and need an extra
+ // temp register to do the work.
+ return !value->IsConstant() && key_representation.IsTagged() &&
+ (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS);
+}
class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
@@ -1287,11 +1432,11 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
inputs_[2] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
@@ -1309,11 +1454,12 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = global_object;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
@@ -1325,10 +1471,10 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
- LOperand* value() { return inputs_[0]; }
};
@@ -1342,13 +1488,14 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
- LOperand* context() { return InputAt(0); }
- LOperand* global_object() { return InputAt(1); }
Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(2); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1359,10 +1506,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1377,11 +1525,13 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
temps_[0] = temp;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1394,6 +1544,8 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
@@ -1430,9 +1582,9 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+ LOperand* context() { return inputs_[0]; }
- LOperand* context() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
};
@@ -1442,6 +1594,8 @@ class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
@@ -1454,10 +1608,12 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
qml_global_ = qml_global;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
- LOperand* context() { return InputAt(0); }
bool qml_global() { return qml_global_; }
+
private:
bool qml_global_;
};
@@ -1469,9 +1625,9 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+ LOperand* global() { return inputs_[0]; }
- LOperand* global() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
@@ -1494,12 +1650,12 @@ class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = function;
}
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1514,12 +1670,12 @@ class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
LOperand* context() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1532,12 +1688,13 @@ class LCallNamed: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
virtual void PrintDataTo(StringStream* stream);
- LOperand* context() { return inputs_[0]; }
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1550,11 +1707,12 @@ class LCallFunction: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = function;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1565,12 +1723,13 @@ class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
virtual void PrintDataTo(StringStream* stream);
- LOperand* context() { return inputs_[0]; }
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1599,13 +1758,14 @@ class LCallNew: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = constructor;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
virtual void PrintDataTo(StringStream* stream);
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1615,10 +1775,12 @@ class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
}
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- LOperand* context() { return inputs_[0]; }
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
@@ -1630,20 +1792,51 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
+class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
+class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagD(LOperand* value, LOperand* temp) {
@@ -1651,6 +1844,9 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
};
@@ -1663,6 +1859,9 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1678,6 +1877,9 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1691,6 +1893,8 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
@@ -1702,6 +1906,9 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
};
@@ -1714,6 +1921,8 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
bool needs_check() const { return needs_check_; }
@@ -1723,22 +1932,28 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
public:
- LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) {
+ LStoreNamedField(LOperand* obj,
+ LOperand* val,
+ LOperand* temp,
+ LOperand* temp_map) {
inputs_[0] = obj;
inputs_[1] = val;
temps_[0] = temp;
+ temps_[1] = temp_map;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp_map() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
@@ -1754,89 +1969,44 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
+ bool is_external() const { return hydrogen()->is_external(); }
+ LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = val;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream);
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
-
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
@@ -1849,15 +2019,16 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
inputs_[3] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1866,21 +2037,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
- LOperand* temp_reg) {
+ LOperand* temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
- temps_[1] = temp_reg;
+ temps_[1] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+ LOperand* temp() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_reg() { return temps_[0]; }
- LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
@@ -1894,12 +2066,12 @@ class LStringAdd: public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
@@ -1911,12 +2083,12 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
inputs_[2] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
LOperand* context() { return inputs_[0]; }
LOperand* string() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
@@ -1927,11 +2099,11 @@ class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = char_code;
}
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
LOperand* context() { return inputs_[0]; }
LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
};
@@ -1941,10 +2113,10 @@ class LStringLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = string;
}
+ LOperand* string() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
};
@@ -1968,6 +2140,9 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
};
@@ -1979,17 +2154,21 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
+class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
public:
explicit LCheckPrototypeMaps(LOperand* temp) {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
@@ -2004,6 +2183,8 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
@@ -2051,6 +2232,8 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
@@ -2062,10 +2245,11 @@ class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-
- LOperand* context() { return inputs_[0]; }
};
@@ -2142,6 +2326,8 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
@@ -2154,6 +2340,9 @@ class LTypeof: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2164,6 +2353,8 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
@@ -2181,11 +2372,11 @@ class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
inputs_[2] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
};
@@ -2305,69 +2496,19 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LChunk: public ZoneObject {
+class LPlatformChunk: public LChunk {
public:
- LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) { }
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph),
+ num_double_slots_(0) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
- }
+ int num_double_slots() const { return num_double_slots_; }
private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
+ int num_double_slots_;
};
@@ -2377,7 +2518,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->isolate()->zone()),
+ zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@@ -2386,16 +2527,19 @@ class LChunkBuilder BASE_EMBEDDED {
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
- LChunk* Build();
+ LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
+ static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+
private:
enum Status {
UNUSED,
@@ -2404,17 +2548,17 @@ class LChunkBuilder BASE_EMBEDDED {
ABORTED
};
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2508,7 +2652,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
@@ -2520,7 +2664,7 @@ class LChunkBuilder BASE_EMBEDDED {
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
+ BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
index c31b0c2..26d0f92 100644
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
@@ -85,7 +85,7 @@ void MacroAssembler::RememberedSetHelper(
SaveFPRegsMode save_fp,
MacroAssembler::RememberedSetFinalAction and_then) {
Label done;
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
int3();
@@ -129,17 +129,22 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
XMMRegister scratch_reg,
Register result_reg) {
Label done;
- ExternalReference zero_ref = ExternalReference::address_of_zero();
- movdbl(scratch_reg, Operand::StaticVariable(zero_ref));
+ Label conv_failure;
+ pxor(scratch_reg, scratch_reg);
+ cvtsd2si(result_reg, input_reg);
+ test(result_reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ cmp(result_reg, Immediate(0x80000000));
+ j(equal, &conv_failure, Label::kNear);
+ mov(result_reg, Immediate(0));
+ setcc(above, result_reg);
+ sub(result_reg, Immediate(1));
+ and_(result_reg, Immediate(255));
+ jmp(&done, Label::kNear);
+ bind(&conv_failure);
Set(result_reg, Immediate(0));
ucomisd(input_reg, scratch_reg);
j(below, &done, Label::kNear);
- ExternalReference half_ref = ExternalReference::address_of_one_half();
- movdbl(scratch_reg, Operand::StaticVariable(half_ref));
- addsd(scratch_reg, input_reg);
- cvttsd2si(result_reg, Operand(scratch_reg));
- test(result_reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
Set(result_reg, Immediate(255));
bind(&done);
}
@@ -155,6 +160,24 @@ void MacroAssembler::ClampUint8(Register reg) {
}
+static double kUint32Bias =
+ static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
+
+
+void MacroAssembler::LoadUint32(XMMRegister dst,
+ Register src,
+ XMMRegister scratch) {
+ Label done;
+ cmp(src, Immediate(0));
+ movdbl(scratch,
+ Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE));
+ cvtsi2sd(dst, src);
+ j(not_sign, &done, Label::kNear);
+ addsd(dst, scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::RecordWriteArray(Register object,
Register value,
Register index,
@@ -237,6 +260,66 @@ void MacroAssembler::RecordWriteField(
}
+void MacroAssembler::RecordWriteForMap(
+ Register object,
+ Handle<Map> map,
+ Register scratch1,
+ Register scratch2,
+ SaveFPRegsMode save_fp) {
+ Label done;
+
+ Register address = scratch1;
+ Register value = scratch2;
+ if (emit_debug_code()) {
+ Label ok;
+ lea(address, FieldOperand(object, HeapObject::kMapOffset));
+ test_b(address, (1 << kPointerSizeLog2) - 1);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
+ AssertNotSmi(object);
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ ASSERT(!isolate()->heap()->InNewSpace(*map));
+ CheckPageFlagForMap(map,
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ // Delay the initialization of |address| and |value| for the stub until it's
+ // known that the will be needed. Up until this point their values are not
+ // needed since they are embedded in the operands of instructions that need
+ // them.
+ lea(address, FieldOperand(object, HeapObject::kMapOffset));
+ mov(value, Immediate(map));
+ RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
+ CallStub(&stub);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
void MacroAssembler::RecordWrite(Register object,
Register address,
Register value,
@@ -246,16 +329,14 @@ void MacroAssembler::RecordWrite(Register object,
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
- if (emit_debug_code()) {
- AbortIfSmi(object);
- }
+ AssertNotSmi(object);
if (remembered_set_action == OMIT_REMEMBERED_SET &&
!FLAG_incremental_marking) {
return;
}
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
cmp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
@@ -382,10 +463,12 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastElementValue);
+ Map::kMaximumBitField2FastHoleyElementValue);
j(above, fail, distance);
}
@@ -393,23 +476,26 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastSmiOnlyElementValue);
+ Map::kMaximumBitField2FastHoleySmiElementValue);
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastElementValue);
+ Map::kMaximumBitField2FastHoleyElementValue);
j(above, fail, distance);
}
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastSmiOnlyElementValue);
+ Map::kMaximumBitField2FastHoleySmiElementValue);
j(above, fail, distance);
}
@@ -493,24 +579,18 @@ void MacroAssembler::CompareMap(Register obj,
CompareMapMode mode) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- Map* transitioned_fast_element_map(
- map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
- ASSERT(transitioned_fast_element_map == NULL ||
- map->elements_kind() != FAST_ELEMENTS);
- if (transitioned_fast_element_map != NULL) {
- j(equal, early_success, Label::kNear);
- cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(transitioned_fast_element_map));
- }
-
- Map* transitioned_double_map(
- map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
- ASSERT(transitioned_double_map == NULL ||
- map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
- if (transitioned_double_map != NULL) {
- j(equal, early_success, Label::kNear);
- cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(transitioned_double_map));
+ ElementsKind kind = map->elements_kind();
+ if (IsFastElementsKind(kind)) {
+ bool packed = IsFastPackedElementsKind(kind);
+ Map* current_map = *map;
+ while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
+ kind = GetNextMoreGeneralFastElementsKind(kind, packed);
+ current_map = current_map->LookupElementsTransitionMap(kind);
+ if (!current_map) break;
+ j(equal, early_success, Label::kNear);
+ cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(current_map));
+ }
}
}
}
@@ -592,36 +672,44 @@ void MacroAssembler::FCmp() {
}
-void MacroAssembler::AbortIfNotNumber(Register object) {
- Label ok;
- JumpIfSmi(object, &ok);
- cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Assert(equal, "Operand not a number");
- bind(&ok);
+void MacroAssembler::AssertNumber(Register object) {
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfSmi(object, &ok);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(equal, "Operand not a number");
+ bind(&ok);
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- test(object, Immediate(kSmiTagMask));
- Assert(equal, "Operand is not a smi");
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(equal, "Operand is not a smi");
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- test(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is not a string");
- push(object);
- mov(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(below, "Operand is not a string");
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, "Operand is a smi and not a string");
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(below, "Operand is not a string");
+ }
}
-void MacroAssembler::AbortIfSmi(Register object) {
- test(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is a smi");
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, "Operand is a smi");
+ }
}
@@ -921,23 +1009,24 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
cmp(scratch, Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
mov(scratch, FieldOperand(scratch, offset));
- mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
push(scratch);
- // Read the first word and compare to global_context_map.
+ // Read the first word and compare to native_context_map.
mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- cmp(scratch, isolate()->factory()->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
+ cmp(scratch, isolate()->factory()->native_context_map());
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
pop(scratch);
}
// Check if both contexts are the same.
- cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens, save holder_reg on the stack so we can use it
@@ -948,18 +1037,19 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check that the security token in the calling global object is
// compatible with the security token in the receiving global
// object.
- mov(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ mov(holder_reg,
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
cmp(holder_reg, isolate()->factory()->null_value());
Check(not_equal, "JSGlobalProxy::context() should not be null.");
push(holder_reg);
- // Read the first word and compare to global_context_map(),
+ // Read the first word and compare to native_context_map(),
mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, isolate()->factory()->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
+ cmp(holder_reg, isolate()->factory()->native_context_map());
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
pop(holder_reg);
}
@@ -1646,7 +1736,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
}
-void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
@@ -1861,16 +1951,53 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
+
+#if ENABLE_EXTRA_CHECKS
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = eax;
+ Register map = ecx;
+
+ JumpIfSmi(return_value, &ok, Label::kNear);
+ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ j(below, &ok, Label::kNear);
+
+ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ j(above_equal, &ok, Label::kNear);
+
+ cmp(map, isolate()->factory()->heap_number_map());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->undefined_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->true_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->false_value());
+ j(equal, &ok, Label::kNear);
+
+ cmp(return_value, isolate()->factory()->null_value());
+ j(equal, &ok, Label::kNear);
+
+ Abort("API call returned invalid object");
+
+ bind(&ok);
+#endif
+
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
- bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
bind(&empty_handle);
// It was zero; the result is undefined.
mov(eax, isolate()->factory()->undefined_value());
jmp(&prologue);
+ bind(&promote_scheduled_exception);
+ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
ExternalReference::delete_handle_scope_extensions(isolate());
@@ -2108,7 +2235,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the JavaScript builtin function from the builtins object.
- mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
mov(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
@@ -2157,31 +2284,42 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- int expected_index =
- Context::GetContextMapIndexFromElementsKind(expected_kind);
- cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
+ mov(scratch, Operand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+
+ size_t offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ cmp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- int trans_index =
- Context::GetContextMapIndexFromElementsKind(transitioned_kind);
- mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ mov(map_in_out, FieldOperand(scratch, offset));
}
void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch, Register map_out) {
+ Register function_in, Register scratch,
+ Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
mov(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
+ ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ kind,
+ map_out,
+ scratch,
+ &done);
+ } else if (can_have_holes) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_HOLEY_SMI_ELEMENTS,
map_out,
scratch,
&done);
@@ -2192,10 +2330,11 @@ void MacroAssembler::LoadInitialArrayMap(
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- mov(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ mov(function,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ mov(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
mov(function, Operand(function, Context::SlotOffset(index)));
}
@@ -2446,12 +2585,13 @@ void MacroAssembler::Abort(const char* msg) {
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- mov(descriptors,
- FieldOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, &not_smi);
- mov(descriptors, isolate()->factory()->empty_descriptor_array());
- bind(&not_smi);
+ mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ mov(dst, FieldOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
@@ -2475,7 +2615,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
}
and_(scratch,
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- cmp(scratch, kStringTag | kSeqStringTag | kAsciiStringTag);
+ cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
j(not_equal, failure);
}
@@ -2608,6 +2748,28 @@ void MacroAssembler::CheckPageFlag(
}
+void MacroAssembler::CheckPageFlagForMap(
+ Handle<Map> map,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ Page* page = Page::FromAddress(map->address());
+ ExternalReference reference(ExternalReference::page_flags(page));
+ // The inlined static address check of the page's flags relies
+ // on maps never being compacted.
+ ASSERT(!isolate()->heap()->mark_compact_collector()->
+ IsOnEvacuationCandidate(*map));
+ if (mask < (1 << kBitsPerByte)) {
+ test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
+ } else {
+ test(Operand::StaticVariable(reference), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
@@ -2692,7 +2854,7 @@ void MacroAssembler::EnsureNotWhite(
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(not_zero, &done, Label::kNear);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
push(mask_scratch);
@@ -2744,7 +2906,7 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kAsciiStringTag == 0x04);
+ ASSERT(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
add(length, Immediate(0x04));
@@ -2767,7 +2929,7 @@ void MacroAssembler::EnsureNotWhite(
and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
length);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
Check(less_equal, "Live Bytes Count overflow chunk size");
@@ -2777,40 +2939,43 @@ void MacroAssembler::EnsureNotWhite(
}
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ mov(dst, FieldOperand(map, Map::kBitField3Offset));
+ and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask)));
+}
+
+
void MacroAssembler::CheckEnumCache(Label* call_runtime) {
- Label next;
+ Label next, start;
mov(ecx, eax);
- bind(&next);
-
- // Check that there are no elements. Register ecx contains the
- // current JS object we've reached through the prototype chain.
- cmp(FieldOperand(ecx, JSObject::kElementsOffset),
- isolate()->factory()->empty_fixed_array());
- j(not_equal, call_runtime);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(edx, call_runtime);
- // Check that there is an enum cache in the non-empty instance
- // descriptors (edx). This is the case if the next enumeration
- // index field does not contain a smi.
- mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(edx, call_runtime);
+ EnumLength(edx, ebx);
+ cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
+ j(equal, call_runtime);
+
+ jmp(&start);
+
+ bind(&next);
+ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- cmp(ecx, eax);
- j(equal, &check_prototype, Label::kNear);
- mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- cmp(edx, isolate()->factory()->empty_fixed_array());
+ EnumLength(edx, ebx);
+ cmp(edx, Immediate(Smi::FromInt(0)));
+ j(not_equal, call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register rcx contains the current JS
+ // object we've reached through the prototype chain.
+ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ cmp(ecx, isolate()->factory()->empty_fixed_array());
j(not_equal, call_runtime);
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
cmp(ecx, isolate()->factory()->null_value());
j(not_equal, &next);
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
index 1cc9142..b91cfcd 100644
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
+++ b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
@@ -90,6 +90,13 @@ class MacroAssembler: public Assembler {
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckPageFlagForMap(
+ Handle<Map> map,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
@@ -194,6 +201,16 @@ class MacroAssembler: public Assembler {
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
+ // For page containing |object| mark the region covering the object's map
+ // dirty. |object| is the object being stored into, |map| is the Map object
+ // that was stored.
+ void RecordWriteForMap(
+ Register object,
+ Handle<Map> map,
+ Register scratch1,
+ Register scratch2,
+ SaveFPRegsMode save_fp);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
@@ -222,8 +239,8 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the global context if the map in register
- // map_in_out is the cached Array map in the global context of
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@@ -235,7 +252,8 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
- Register map_out);
+ Register map_out,
+ bool can_have_holes);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -357,9 +375,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiOnlyElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
+ void CheckFastSmiElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
@@ -449,6 +467,8 @@ class MacroAssembler: public Assembler {
j(not_carry, is_smi);
}
+ void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
Label* smi_label,
@@ -472,20 +492,29 @@ class MacroAssembler: public Assembler {
}
void LoadInstanceDescriptors(Register map, Register descriptors);
-
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const int shift = Field::kShift;
+ static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
+ sar(reg, shift);
+ and_(reg, Immediate(mask));
+ }
void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
- // Abort execution if argument is not a number. Used in debug code.
- void AbortIfNotNumber(Register object);
+ // Abort execution if argument is not a number, enabled via --debug-code.
+ void AssertNumber(Register object);
- // Abort execution if argument is not a smi. Used in debug code.
- void AbortIfNotSmi(Register object);
+ // Abort execution if argument is not a smi, enabled via --debug-code.
+ void AssertSmi(Register object);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
// ---------------------------------------------------------------------------
// Exception handling
@@ -670,7 +699,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub. Generate the code if necessary.
- void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
// Tail call a code stub (jump). Generate the code if necessary.
void TailCallStub(CodeStub* stub);
@@ -943,11 +972,11 @@ inline Operand ContextOperand(Register context, int index) {
inline Operand GlobalObjectOperand() {
- return ContextOperand(esi, Context::GLOBAL_INDEX);
+ return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
}
static inline Operand QmlGlobalObjectOperand() {
- return ContextOperand(esi, Context::QML_GLOBAL_INDEX);
+ return ContextOperand(esi, Context::QML_GLOBAL_OBJECT_INDEX);
}
// Generates an Operand for saving parameters after PrepareCallApiFunction.
diff --git a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 0029f33..622dc42 100644
--- a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -42,28 +42,30 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
- * - edx : current character. Must be loaded using LoadCurrentCharacter
- * before using any of the dispatch methods.
- * - edi : current position in input, as negative offset from end of string.
+ * - edx : Current character. Must be loaded using LoadCurrentCharacter
+ * before using any of the dispatch methods. Temporarily stores the
+ * index of capture start after a matching pass for a global regexp.
+ * - edi : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - esi : end of input (points to byte after last character in input).
- * - ebp : frame pointer. Used to access arguments, local variables and
+ * - ebp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
- * - esp : points to tip of C stack.
- * - ecx : points to tip of backtrack stack
+ * - esp : Points to tip of C stack.
+ * - ecx : Points to tip of backtrack stack
*
* The registers eax and ebx are free to use for computations.
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
- * - Isolate* isolate (Address of the current isolate)
+ * - Isolate* isolate (address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system)
- * - stack_area_base (High end of the memory area to use as
+ * - stack_area_base (high end of the memory area to use as
* backtracking stack)
+ * - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
+ * - end of input (address of end of string)
+ * - start of input (address of first character in string)
* - start index (character index of start)
* - String* input_string (location of a handle containing the string)
* --- frame alignment (if applicable) ---
@@ -72,9 +74,10 @@ namespace internal {
* - backup of caller esi
* - backup of caller edi
* - backup of caller ebx
+ * - success counter (only for global regexps to count matches).
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
- * - register 0 ebp[-4] (Only positions must be stored in the first
+ * - register 0 ebp[-4] (only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
*
@@ -98,8 +101,10 @@ namespace internal {
RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -311,6 +316,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// uncaptured. In either case succeed immediately.
__ j(equal, &fallthrough);
+ // Check that there are sufficient characters left in the input.
+ __ mov(eax, edi);
+ __ add(eax, ebx);
+ BranchOrBacktrack(greater, on_no_match);
+
if (mode_ == ASCII) {
Label success;
Label fail;
@@ -482,15 +492,6 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
}
-void RegExpMacroAssemblerIA32::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- __ mov(eax, register_location(reg1));
- __ cmp(eax, register_location(reg2));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
Label* on_not_equal) {
__ cmp(current_character(), c);
@@ -706,13 +707,16 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() {
- ASSERT(FAILURE == 0); // Return value for failure is zero.
- __ Set(eax, Immediate(0));
+ STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
+ if (!global()) {
+ __ Set(eax, Immediate(FAILURE));
+ }
__ jmp(&exit_label_);
}
Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
+ Label return_eax;
// Finalize code - write the entry point code now we know how many
// registers we need.
@@ -731,6 +735,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(esi);
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
+ __ push(Immediate(0)); // Number of successful matches in a global regexp.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
@@ -750,13 +755,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(eax, EXCEPTION);
- __ jmp(&exit_label_);
+ __ jmp(&return_eax);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(ebx);
__ or_(eax, eax);
// If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &exit_label_);
+ __ j(not_zero, &return_eax);
__ bind(&stack_ok);
// Load start index for later use.
@@ -783,19 +788,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- __ mov(ecx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ mov(Operand(ebp, ecx, times_1, +0), eax);
- __ sub(ecx, Immediate(kPointerSize));
- __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
- __ j(greater, &init_loop);
- }
- // Ensure that we have written to each stack page, in order. Skipping a page
+#ifdef WIN32
+ // Ensure that we write to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
const int kRegistersPerPage = kPageSize / kPointerSize;
@@ -804,20 +798,45 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
}
+#endif // WIN32
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ __ j(not_equal, &load_char_start_regexp, Label::kNear);
+ __ mov(current_character(), '\n');
+ __ jmp(&start_regexp, Label::kNear);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1
+ // Fill in stack push order, to avoid accessing across an unwritten
+ // page (a problem on Windows).
+ if (num_saved_registers_ > 8) {
+ __ mov(ecx, kRegisterZero);
+ Label init_loop;
+ __ bind(&init_loop);
+ __ mov(Operand(ebp, ecx, times_1, 0), eax);
+ __ sub(ecx, Immediate(kPointerSize));
+ __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
+ __ j(greater, &init_loop);
+ } else { // Unroll the loop.
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ mov(register_location(i), eax);
+ }
+ }
+ }
// Initialize backtrack stack pointer.
__ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
- // Load previous char as initial value of current-character.
- Label at_start;
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- __ j(equal, &at_start);
- LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
- __ jmp(&start_label_);
- __ bind(&at_start);
- __ mov(current_character(), '\n');
- __ jmp(&start_label_);
+ __ jmp(&start_label_);
// Exit code:
if (success_label_.is_linked()) {
@@ -836,6 +855,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(eax, register_location(i));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in edx for the zero-length check later.
+ __ mov(edx, eax);
+ }
// Convert to index from start of string, not end.
__ add(eax, ecx);
if (mode_ == UC16) {
@@ -844,10 +867,57 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(Operand(ebx, i * kPointerSize), eax);
}
}
- __ mov(eax, Immediate(SUCCESS));
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ // Increment success counter.
+ __ inc(Operand(ebp, kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ mov(ecx, Operand(ebp, kNumOutputRegisters));
+ __ sub(ecx, Immediate(num_saved_registers_));
+ // Check whether we have enough room for another set of capture results.
+ __ cmp(ecx, Immediate(num_saved_registers_));
+ __ j(less, &exit_label_);
+
+ __ mov(Operand(ebp, kNumOutputRegisters), ecx);
+ // Advance the location for output.
+ __ add(Operand(ebp, kRegisterOutput),
+ Immediate(num_saved_registers_ * kPointerSize));
+
+ // Prepare eax to initialize registers with its value in the next run.
+ __ mov(eax, Operand(ebp, kInputStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // edx: capture start index
+ __ cmp(edi, edx);
+ // Not a zero-length match, restart.
+ __ j(not_equal, &load_char_start_regexp);
+ // edi (offset from the end) is zero if we already reached the end.
+ __ test(edi, edi);
+ __ j(zero, &exit_label_, Label::kNear);
+ // Advance current position after a zero-length match.
+ if (mode_ == UC16) {
+ __ add(edi, Immediate(2));
+ } else {
+ __ inc(edi);
+ }
+ }
+
+ __ jmp(&load_char_start_regexp);
+ } else {
+ __ mov(eax, Immediate(SUCCESS));
+ }
}
- // Exit and return eax
+
__ bind(&exit_label_);
+ if (global()) {
+ // Return the number of successful captures.
+ __ mov(eax, Operand(ebp, kSuccessfulCaptures));
+ }
+
+ __ bind(&return_eax);
// Skip esp past regexp registers.
__ lea(esp, Operand(ebp, kBackup_ebx));
// Restore callee-save registers.
@@ -877,7 +947,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ or_(eax, eax);
// If returning non-zero, we should end execution with the given
// result as return value.
- __ j(not_zero, &exit_label_);
+ __ j(not_zero, &return_eax);
__ pop(edi);
__ pop(backtrack_stackpointer());
@@ -924,7 +994,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(eax, EXCEPTION);
- __ jmp(&exit_label_);
+ __ jmp(&return_eax);
}
CodeDesc code_desc;
@@ -1043,8 +1113,9 @@ void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
}
-void RegExpMacroAssemblerIA32::Succeed() {
+bool RegExpMacroAssemblerIA32::Succeed() {
__ jmp(&success_label_);
+ return global();
}
diff --git a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h
index 78cd069..7aea385 100644
--- a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,17 +34,10 @@
namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerIA32() { }
- virtual ~RegExpMacroAssemblerIA32() { }
-};
-
-#else // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerIA32(Mode mode, int registers_to_save);
+ RegExpMacroAssemblerIA32(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerIA32();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
@@ -69,7 +62,6 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
@@ -111,7 +103,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
+ virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@@ -135,7 +127,11 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ // For the case of global regular expression, we have room to store at least
+ // one set of capture results. For the case of non-global regexp, we ignore
+ // this value.
+ static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer - local stack variables.
@@ -144,7 +140,8 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_esi = kFramePointer - kPointerSize;
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
- static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
+ static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
diff --git a/src/3rdparty/v8/src/ia32/simulator-ia32.h b/src/3rdparty/v8/src/ia32/simulator-ia32.h
index 13ddf35..478d4ce 100644
--- a/src/3rdparty/v8/src/ia32/simulator-ia32.h
+++ b/src/3rdparty/v8/src/ia32/simulator-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,12 +40,12 @@ namespace internal {
typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, Address, int, Isolate*);
+ const byte*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
diff --git a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc b/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
index e148e2f..11efb72 100644
--- a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
@@ -276,12 +276,12 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
// Check we're still in the same context.
- __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
- masm->isolate()->global());
+ __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
+ masm->isolate()->global_object());
__ j(not_equal, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
- JSFunction::cast(masm->isolate()->global_context()->get(index)));
+ JSFunction::cast(masm->isolate()->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
@@ -745,10 +745,22 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
int index,
Handle<Map> transition,
+ Handle<String> name,
Register receiver_reg,
Register name_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label) {
+ LookupResult lookup(masm->isolate());
+ object->Lookup(*name, &lookup);
+ if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
+ // In sloppy mode, we could just return the value and be done. However, we
+ // might be in strict mode, where we have to throw. Since we cannot tell,
+ // go into slow case unconditionally.
+ __ jmp(miss_label);
+ return;
+ }
+
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
@@ -757,7 +769,32 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Check that we are allowed to write this.
+ if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ JSObject* holder;
+ if (lookup.IsFound()) {
+ holder = lookup.holder();
+ } else {
+ // Find the top object.
+ holder = *object;
+ do {
+ holder = JSObject::cast(holder->GetPrototype());
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+ // We need an extra register, push
+ __ push(name_reg);
+ Label miss_pop, done_check;
+ CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
+ scratch1, scratch2, name, &miss_pop);
+ __ jmp(&done_check);
+ __ bind(&miss_pop);
+ __ pop(name_reg);
+ __ jmp(miss_label);
+ __ bind(&done_check);
+ __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -768,11 +805,11 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
- __ pop(scratch); // Return address.
+ __ pop(scratch1); // Return address.
__ push(receiver_reg);
__ push(Immediate(transition));
__ push(eax);
- __ push(scratch);
+ __ push(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
masm->isolate()),
@@ -782,10 +819,19 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
if (!transition.is_null()) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Immediate(transition));
+ // Update the map of the object.
+ __ mov(scratch1, Immediate(transition));
+ __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+ // Update the write barrier for the map field and pass the now unused
+ // name_reg as scratch register.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ name_reg,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
}
// Adjust for the number of properties stored in the object. Even in the
@@ -804,19 +850,19 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ RecordWriteField(receiver_reg,
offset,
name_reg,
- scratch,
+ scratch1,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch, offset), eax);
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ mov(FieldOperand(scratch1, offset), eax);
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, eax);
- __ RecordWriteField(scratch,
+ __ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
@@ -1006,6 +1052,58 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
}
+void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
+ ASSERT(!receiver.is(scratch2));
+ ASSERT(!receiver.is(scratch3));
+ Register dictionary = scratch1;
+ bool must_preserve_dictionary_reg = receiver.is(dictionary);
+
+ // Load the properties dictionary.
+ if (must_preserve_dictionary_reg) {
+ __ push(dictionary);
+ }
+ __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done, pop_and_miss;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &pop_and_miss,
+ &probe_done,
+ dictionary,
+ name_reg,
+ scratch2,
+ scratch3);
+ __ bind(&pop_and_miss);
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ jmp(miss);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch2 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch2;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(scratch3,
+ Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ cmp(scratch3, callback);
+ __ j(not_equal, miss);
+}
+
+
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1013,6 +1111,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@@ -1023,6 +1122,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ GenerateDictionaryLoadCallback(
+ reg, name_reg, scratch1, scratch2, scratch3, callback, name, miss);
+ }
+
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3.is(reg));
__ pop(scratch3); // Get return address to place it below.
@@ -1111,12 +1215,13 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
}
}
@@ -1195,7 +1300,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), eax, holder_reg,
@@ -1368,7 +1473,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -1462,16 +1567,31 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiOnlyElements(ebx, &call_builtin);
+ __ CheckFastSmiElements(ebx, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ Label try_holey_map;
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
+ &try_holey_map);
+
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
+ // Restore edi.
+ __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
+ __ jmp(&fast_object);
+
+ __ bind(&try_holey_map);
+ __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
+ FAST_HOLEY_ELEMENTS,
+ ebx,
+ edi,
&call_builtin);
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ bind(&fast_object);
@@ -1900,7 +2020,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2030,7 +2150,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2135,7 +2255,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2382,7 +2502,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2443,7 +2563,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2460,8 +2580,13 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
Label miss;
// Generate store field code. Trashes the name register.
- GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
-
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ edx, ecx, ebx, edi,
+ &miss);
// Handle store cache miss.
__ bind(&miss);
__ mov(ecx, Immediate(name)); // restore name
@@ -2469,14 +2594,17 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -2484,19 +2612,14 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- esp[0] : return address
// -----------------------------------
Label miss;
+ // Check that the maps haven't changed, preserving the value register.
+ __ push(eax);
+ __ JumpIfSmi(edx, &miss);
+ CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
+ __ pop(eax); // restore value
- // Check that the map of the object hasn't changed.
- __ CheckMap(edx, Handle<Map>(object->map()),
- &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(edx, ebx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ pop(ebx); // remove the return address
__ push(edx); // receiver
@@ -2512,11 +2635,89 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Handle store cache miss.
__ bind(&miss);
+ __ pop(eax);
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(eax);
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ __ push(edx);
+ __ push(eax);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(eax);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed, preserving the name register.
+ __ push(ecx);
+ __ JumpIfSmi(edx, &miss);
+ CheckPrototypes(receiver, edx, holder, ebx, ecx, edi, name, &miss);
+ __ pop(ecx);
+
+ GenerateStoreViaSetter(masm(), setter);
+
+ __ bind(&miss);
+ __ pop(ecx);
Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2562,7 +2763,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2610,7 +2811,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2634,7 +2835,13 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ j(not_equal, &miss);
// Generate store field code. Trashes the name register.
- GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ edx, ecx, ebx, edi,
+ &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2643,7 +2850,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
@@ -2666,7 +2875,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -2701,7 +2910,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -2741,7 +2950,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@@ -2761,7 +2970,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2777,13 +2986,76 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, callback,
- name, &miss);
+ GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, no_reg,
+ callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(edx);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(edx, &miss);
+ CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
+
+ GenerateLoadViaGetter(masm(), getter);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2803,7 +3075,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2829,7 +3101,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2877,7 +3149,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2906,7 +3178,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2929,15 +3201,15 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(ecx, Immediate(name));
__ j(not_equal, &miss);
- GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, callback,
- name, &miss);
+ GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, no_reg,
+ callback, name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2967,7 +3239,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2998,7 +3270,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -3024,7 +3296,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3050,7 +3322,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3076,7 +3348,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3096,7 +3368,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3123,7 +3395,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3149,6 +3421,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#endif
// Load the initial map and verify that it is in fact a map.
+ // edi: constructor
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ JumpIfSmi(ebx, &generic_stub_call);
@@ -3157,19 +3430,23 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#ifdef DEBUG
// Cannot construct functions this way.
- // edi: constructor
// ebx: initial map
__ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ Assert(not_equal, "Function constructed by construct stub.");
+ __ Check(not_equal, "Function constructed by construct stub.");
#endif
// Now allocate the JSObject on the heap by moving the new space allocation
// top forward.
- // edi: constructor
// ebx: initial map
+ ASSERT(function->has_initial_map());
+ int instance_size = function->initial_map()->instance_size();
+#ifdef DEBUG
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ shl(ecx, kPointerSizeLog2);
- __ AllocateInNewSpace(ecx, edx, ecx, no_reg,
+ __ cmp(ecx, Immediate(instance_size));
+ __ Check(equal, "Instance size of initial map changed.");
+#endif
+ __ AllocateInNewSpace(instance_size, edx, ecx, no_reg,
&generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
@@ -3229,7 +3506,6 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -3818,7 +4094,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(eax, &transition_elements_kind);
}
@@ -3843,7 +4119,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ j(not_equal, &miss_force_generic);
__ bind(&finish_store);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
__ mov(FieldOperand(edi,
@@ -3851,7 +4127,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
times_half_pointer_size,
FixedArray::kHeaderSize), eax);
} else {
- ASSERT(elements_kind == FAST_ELEMENTS);
+ ASSERT(IsFastObjectElementsKind(elements_kind));
// Do the store and update the write barrier.
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
diff --git a/src/3rdparty/v8/src/ic-inl.h b/src/3rdparty/v8/src/ic-inl.h
index 6a86921..49b6ef9 100644
--- a/src/3rdparty/v8/src/ic-inl.h
+++ b/src/3rdparty/v8/src/ic-inl.h
@@ -40,7 +40,7 @@ namespace internal {
Address IC::address() const {
// Get the address of the call.
- Address result = pc() - Assembler::kCallTargetAddressOffset;
+ Address result = Assembler::target_address_from_return_address(pc());
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = Isolate::Current()->debug();
@@ -79,6 +79,7 @@ Code* IC::GetTargetAtAddress(Address address) {
void IC::SetTargetAtAddress(Address address, Code* target) {
ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
+ Heap* heap = target->GetHeap();
Code* old_target = GetTargetAtAddress(address);
#ifdef DEBUG
// STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
@@ -90,8 +91,11 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
}
#endif
Assembler::set_target_address_at(address, target->instruction_start());
- target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address,
- target);
+ if (heap->gc_state() == Heap::MARK_COMPACT) {
+ heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
+ } else {
+ heap->incremental_marking()->RecordCodeTargetPatch(address, target);
+ }
PostPatching(address, target, old_target);
}
diff --git a/src/3rdparty/v8/src/ic.cc b/src/3rdparty/v8/src/ic.cc
index be5f752..fe31ef1 100644
--- a/src/3rdparty/v8/src/ic.cc
+++ b/src/3rdparty/v8/src/ic.cc
@@ -158,7 +158,7 @@ Address IC::OriginalCodeAddress() const {
// Get the address of the call site in the active code. This is the
// place where the call to DebugBreakXXX is and where the IC
// normally would be.
- Address addr = pc() - Assembler::kCallTargetAddressOffset;
+ Address addr = Assembler::target_address_from_return_address(pc());
// Return the address in the original code. This is the place where
// the call which has been overwritten by the DebugBreakXXX resides
// and the place where the inline cache system should look.
@@ -320,13 +320,17 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
int delta = ComputeTypeInfoCountDelta(old_target->ic_state(),
target->ic_state());
// Not all Code objects have TypeFeedbackInfo.
- if (delta != 0 && host->type_feedback_info()->IsTypeFeedbackInfo()) {
+ if (host->type_feedback_info()->IsTypeFeedbackInfo() && delta != 0) {
TypeFeedbackInfo* info =
TypeFeedbackInfo::cast(host->type_feedback_info());
- info->set_ic_with_type_info_count(
- info->ic_with_type_info_count() + delta);
+ info->change_ic_with_type_info_count(delta);
}
}
+ if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
+ TypeFeedbackInfo* info =
+ TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->change_own_type_change_checksum();
+ }
if (FLAG_watch_ic_patching) {
host->set_profiler_ticks(0);
Isolate::Current()->runtime_profiler()->NotifyICChanged();
@@ -435,9 +439,7 @@ static void LookupForRead(Handle<Object> object,
// Besides normal conditions (property not found or it's not
// an interceptor), bail out if lookup is not cacheable: we won't
// be able to IC it anyway and regular lookup should work fine.
- if (!lookup->IsFound()
- || (lookup->type() != INTERCEPTOR)
- || !lookup->IsCacheable()) {
+ if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
return;
}
@@ -447,14 +449,14 @@ static void LookupForRead(Handle<Object> object,
}
holder->LocalLookupRealNamedProperty(*name, lookup);
- if (lookup->IsProperty()) {
- ASSERT(lookup->type() != INTERCEPTOR);
+ if (lookup->IsFound()) {
+ ASSERT(!lookup->IsInterceptor());
return;
}
Handle<Object> proto(holder->GetPrototype());
if (proto->IsNull()) {
- lookup->NotFound();
+ ASSERT(!lookup->IsFound());
return;
}
@@ -535,7 +537,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
LookupResult lookup(isolate());
LookupForRead(object, name, &lookup);
- if (!lookup.IsProperty()) {
+ if (!lookup.IsFound()) {
// If the object does not have the requested property, check which
// exception we need to throw.
return IsContextual(object)
@@ -554,7 +556,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
Object::GetProperty(object, object, &lookup, name, &attr);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
- if (lookup.type() == INTERCEPTOR && attr == ABSENT) {
+ if (lookup.IsInterceptor() && attr == ABSENT) {
// If the object does not have the requested property, check which
// exception we need to throw.
return IsContextual(object)
@@ -902,7 +904,7 @@ MaybeObject* LoadIC::Load(State state,
LookupForRead(object, name, &lookup);
// If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsProperty()) {
+ if (!lookup.IsFound()) {
if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
@@ -915,8 +917,7 @@ MaybeObject* LoadIC::Load(State state,
}
PropertyAttributes attr;
- if (lookup.IsFound() &&
- (lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
+ if (lookup.IsInterceptor() || lookup.IsHandler()) {
// Get the property.
Handle<Object> result =
Object::GetProperty(object, object, &lookup, name, &attr);
@@ -988,13 +989,29 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
}
break;
case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject());
- if (!callback_object->IsAccessorInfo()) return;
- Handle<AccessorInfo> callback =
- Handle<AccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->getter()) == 0) return;
- code = isolate()->stub_cache()->ComputeLoadCallback(
- name, receiver, holder, callback);
+#ifdef _WIN32_WCE
+ // Disable optimization for wince as the calling convention looks different.
+ return;
+#endif
+ Handle<Object> callback(lookup->GetCallbackObject());
+ if (callback->IsAccessorInfo()) {
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
+ if (v8::ToCData<Address>(info->getter()) == 0) return;
+ if (!info->IsCompatibleReceiver(*receiver)) return;
+ code = isolate()->stub_cache()->ComputeLoadCallback(
+ name, receiver, holder, info);
+ } else if (callback->IsAccessorPair()) {
+ Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter());
+ if (!getter->IsJSFunction()) return;
+ if (holder->IsGlobalObject()) return;
+ if (!holder->HasFastProperties()) return;
+ code = isolate()->stub_cache()->ComputeLoadViaGetter(
+ name, receiver, holder, Handle<JSFunction>::cast(getter));
+ } else {
+ ASSERT(callback->IsForeign());
+ // No IC support for old-style native accessors.
+ return;
+ }
break;
}
case INTERCEPTOR:
@@ -1156,7 +1173,7 @@ MaybeObject* KeyedLoadIC::Load(State state,
LookupForRead(object, name, &lookup);
// If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsProperty() && IsContextual(object)) {
+ if (!lookup.IsFound() && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
@@ -1165,7 +1182,7 @@ MaybeObject* KeyedLoadIC::Load(State state,
}
PropertyAttributes attr;
- if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
+ if (lookup.IsInterceptor()) {
// Get the property.
Handle<Object> result =
Object::GetProperty(object, object, &lookup, name, &attr);
@@ -1256,6 +1273,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
Handle<AccessorInfo> callback =
Handle<AccessorInfo>::cast(callback_object);
if (v8::ToCData<Address>(callback->getter()) == 0) return;
+ if (!callback->IsCompatibleReceiver(*receiver)) return;
code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
name, receiver, holder, callback);
break;
@@ -1288,15 +1306,16 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
static bool StoreICableLookup(LookupResult* lookup) {
// Bail out if we didn't find a result.
- if (!lookup->IsFound() || lookup->type() == NULL_DESCRIPTOR) return false;
+ if (!lookup->IsFound()) return false;
// Bail out if inline caching is not allowed.
if (!lookup->IsCacheable()) return false;
// If the property is read-only, we leave the IC in its current state.
- if (lookup->IsReadOnly()) return false;
-
- return true;
+ if (lookup->IsTransition()) {
+ return !lookup->GetTransitionDetails().IsReadOnly();
+ }
+ return !lookup->IsReadOnly();
}
@@ -1304,11 +1323,16 @@ static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
LookupResult* lookup) {
receiver->LocalLookup(*name, lookup);
+ if (!lookup->IsFound()) {
+ receiver->map()->LookupTransition(*receiver, *name, lookup);
+ }
if (!StoreICableLookup(lookup)) {
- return false;
+ // 2nd chance: There can be accessors somewhere in the prototype chain.
+ receiver->Lookup(*name, lookup);
+ return lookup->IsPropertyCallbacks() && StoreICableLookup(lookup);
}
- if (lookup->type() == INTERCEPTOR &&
+ if (lookup->IsInterceptor() &&
receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
receiver->LocalLookupRealNamedProperty(*name, lookup);
return StoreICableLookup(lookup);
@@ -1357,6 +1381,11 @@ MaybeObject* StoreIC::Store(State state,
return *value;
}
+ // Observed objects are always modified through the runtime.
+ if (FLAG_harmony_observation && receiver->map()->is_observed()) {
+ return receiver->SetProperty(*name, *value, NONE, strict_mode);
+ }
+
// Use specialized code for setting the length of arrays with fast
// properties. Slow properties might indicate redefinition of the
// length property.
@@ -1375,12 +1404,13 @@ MaybeObject* StoreIC::Store(State state,
}
// Lookup the property locally in the receiver.
- if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+ if (!receiver->IsJSGlobalProxy()) {
LookupResult lookup(isolate());
if (LookupForWrite(receiver, name, &lookup)) {
- // Generate a stub for this store.
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ if (FLAG_use_ic) { // Generate a stub for this store.
+ UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ }
} else {
// Strict mode doesn't allow setting non-existent global property
// or an assignment to a read only property.
@@ -1408,7 +1438,11 @@ MaybeObject* StoreIC::Store(State state,
}
// Set the property.
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
+ return receiver->SetProperty(*name,
+ *value,
+ NONE,
+ strict_mode,
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
}
@@ -1420,10 +1454,10 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Object> value) {
ASSERT(!receiver->IsJSGlobalProxy());
ASSERT(StoreICableLookup(lookup));
+ ASSERT(lookup->IsFound());
+
// These are not cacheable, so we never see such LookupResults here.
- ASSERT(lookup->type() != HANDLER);
- // We get only called for properties or transitions, see StoreICableLookup.
- ASSERT(lookup->type() != NULL_DESCRIPTOR);
+ ASSERT(!lookup->IsHandler());
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
@@ -1433,6 +1467,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
// Compute the code stub for this store; used for rewriting to
// monomorphic state and making sure that the code stub is in the
// stub cache.
+ Handle<JSObject> holder(lookup->holder());
Handle<Code> code;
switch (type) {
case FIELD:
@@ -1442,14 +1477,6 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Map>::null(),
strict_mode);
break;
- case MAP_TRANSITION: {
- if (lookup->GetAttributes() != NONE) return;
- Handle<Map> transition(lookup->GetTransitionMap());
- int index = transition->PropertyIndexFor(*name);
- code = isolate()->stub_cache()->ComputeStoreField(
- name, receiver, index, transition, strict_mode);
- break;
- }
case NORMAL:
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
@@ -1460,18 +1487,32 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
code = isolate()->stub_cache()->ComputeStoreGlobal(
name, global, cell, strict_mode);
} else {
- if (lookup->holder() != *receiver) return;
+ if (!holder.is_identical_to(receiver)) return;
code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
}
break;
case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject());
- if (!callback_object->IsAccessorInfo()) return;
- Handle<AccessorInfo> callback =
- Handle<AccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->setter()) == 0) return;
- code = isolate()->stub_cache()->ComputeStoreCallback(
- name, receiver, callback, strict_mode);
+ Handle<Object> callback(lookup->GetCallbackObject());
+ if (callback->IsAccessorInfo()) {
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
+ if (v8::ToCData<Address>(info->setter()) == 0) return;
+ if (!holder->HasFastProperties()) return;
+ if (!info->IsCompatibleReceiver(*receiver)) return;
+ code = isolate()->stub_cache()->ComputeStoreCallback(
+ name, receiver, holder, info, strict_mode);
+ } else if (callback->IsAccessorPair()) {
+ Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter());
+ if (!setter->IsJSFunction()) return;
+ if (holder->IsGlobalObject()) return;
+ if (!holder->HasFastProperties()) return;
+ code = isolate()->stub_cache()->ComputeStoreViaSetter(
+ name, receiver, holder, Handle<JSFunction>::cast(setter),
+ strict_mode);
+ } else {
+ ASSERT(callback->IsForeign());
+ // No IC support for old-style native accessors.
+ return;
+ }
break;
}
case INTERCEPTOR:
@@ -1480,11 +1521,24 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
name, receiver, strict_mode);
break;
case CONSTANT_FUNCTION:
- case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
return;
+ case TRANSITION: {
+ Handle<Map> transition(lookup->GetTransitionTarget());
+ int descriptor = transition->LastAdded();
+
+ DescriptorArray* target_descriptors = transition->instance_descriptors();
+ PropertyDetails details = target_descriptors->GetDetails(descriptor);
+
+ if (details.type() != FIELD || details.attributes() != NONE) return;
+
+ int field_index = target_descriptors->GetFieldIndex(descriptor);
+ code = isolate()->stub_cache()->ComputeStoreField(
+ name, receiver, field_index, transition, strict_mode);
+
+ break;
+ }
+ case NONEXISTENT:
case HANDLER:
- case NULL_DESCRIPTOR:
UNREACHABLE();
return;
}
@@ -1557,44 +1611,49 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
- if (target()->type() != NORMAL) {
+ if (target()->type() != Code::NORMAL) {
TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
return generic_stub;
}
bool monomorphic = false;
+ bool is_transition_stub = IsTransitionStubKind(stub_kind);
+ Handle<Map> receiver_map(receiver->map());
+ Handle<Map> monomorphic_map = receiver_map;
MapHandleList target_receiver_maps;
- if (ic_state != UNINITIALIZED && ic_state != PREMONOMORPHIC) {
+ if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
+ // yet will do so and stay there.
+ monomorphic = true;
+ } else {
GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
- }
- if (!IsTransitionStubKind(stub_kind)) {
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
- monomorphic = true;
- } else {
- if (ic_state == MONOMORPHIC) {
- // The first time a receiver is seen that is a transitioned version of
- // the previous monomorphic receiver type, assume the new ElementsKind
- // is the monomorphic type. This benefits global arrays that only
- // transition once, and all call sites accessing them are faster if they
- // remain monomorphic. If this optimistic assumption is not true, the IC
- // will miss again and it will become polymorphic and support both the
- // untransitioned and transitioned maps.
- monomorphic = IsMoreGeneralElementsKindTransition(
- target_receiver_maps.at(0)->elements_kind(),
- receiver->GetElementsKind());
- }
+ if (ic_state == MONOMORPHIC && (is_transition_stub || stub_kind == LOAD)) {
+ // The first time a receiver is seen that is a transitioned version of the
+ // previous monomorphic receiver type, assume the new ElementsKind is the
+ // monomorphic type. This benefits global arrays that only transition
+ // once, and all call sites accessing them are faster if they remain
+ // monomorphic. If this optimistic assumption is not true, the IC will
+ // miss again and it will become polymorphic and support both the
+ // untransitioned and transitioned maps.
+ monomorphic = IsMoreGeneralElementsKindTransition(
+ target_receiver_maps.at(0)->elements_kind(),
+ receiver->GetElementsKind());
}
}
if (monomorphic) {
+ if (is_transition_stub) {
+ monomorphic_map = ComputeTransitionedMap(receiver, stub_kind);
+ ASSERT(*monomorphic_map != *receiver_map);
+ stub_kind = GetNoTransitionStubKind(stub_kind);
+ }
return ComputeMonomorphicStub(
- receiver, stub_kind, strict_mode, generic_stub);
+ monomorphic_map, stub_kind, strict_mode, generic_stub);
}
ASSERT(target() != *generic_stub);
// Determine the list of receiver maps that this call site has seen,
// adding the map that was just encountered.
- Handle<Map> receiver_map(receiver->map());
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
if (IsTransitionStubKind(stub_kind)) {
@@ -1644,8 +1703,7 @@ Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
return string_stub();
} else {
ASSERT(receiver_map->has_dictionary_elements() ||
- receiver_map->has_fast_elements() ||
- receiver_map->has_fast_smi_only_elements() ||
+ receiver_map->has_fast_smi_or_object_elements() ||
receiver_map->has_fast_double_elements() ||
receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
@@ -1656,17 +1714,16 @@ Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
}
-Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
+Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<Map> receiver_map,
StubKind stub_kind,
StrictModeFlag strict_mode,
Handle<Code> generic_stub) {
- if (receiver->HasFastElements() ||
- receiver->HasFastSmiOnlyElements() ||
- receiver->HasExternalArrayElements() ||
- receiver->HasFastDoubleElements() ||
- receiver->HasDictionaryElements()) {
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind) ||
+ IsDictionaryElementsKind(elements_kind)) {
return isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
- receiver, stub_kind, strict_mode);
+ receiver_map, stub_kind, strict_mode);
} else {
return generic_stub;
}
@@ -1681,15 +1738,26 @@ Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
- break;
case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
- break;
- default:
+ case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
+ case KeyedIC::STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+ case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
+ case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+ return JSObject::GetElementsTransitionMap(receiver,
+ FAST_HOLEY_ELEMENTS);
+ case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+ case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+ return JSObject::GetElementsTransitionMap(receiver,
+ FAST_HOLEY_DOUBLE_ELEMENTS);
+ case KeyedIC::LOAD:
+ case KeyedIC::STORE_NO_TRANSITION:
+ case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
UNREACHABLE();
- return Handle<Map>::null();
+ break;
}
+ return Handle<Map>::null();
}
@@ -1749,30 +1817,54 @@ KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
if (allow_growth) {
// Handle growing array in stub if necessary.
- if (receiver->HasFastSmiOnlyElements()) {
+ if (receiver->HasFastSmiElements()) {
if (value->IsHeapNumber()) {
- return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
+ } else {
+ return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
+ }
}
if (value->IsHeapObject()) {
- return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
+ } else {
+ return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
+ }
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
- return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
+ } else {
+ return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
+ }
}
}
return STORE_AND_GROW_NO_TRANSITION;
} else {
// Handle only in-bounds elements accesses.
- if (receiver->HasFastSmiOnlyElements()) {
+ if (receiver->HasFastSmiElements()) {
if (value->IsHeapNumber()) {
- return STORE_TRANSITION_SMI_TO_DOUBLE;
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
+ } else {
+ return STORE_TRANSITION_SMI_TO_DOUBLE;
+ }
} else if (value->IsHeapObject()) {
- return STORE_TRANSITION_SMI_TO_OBJECT;
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
+ } else {
+ return STORE_TRANSITION_SMI_TO_OBJECT;
+ }
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
- return STORE_TRANSITION_DOUBLE_TO_OBJECT;
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
+ } else {
+ return STORE_TRANSITION_DOUBLE_TO_OBJECT;
+ }
}
}
return STORE_NO_TRANSITION;
@@ -1819,7 +1911,8 @@ MaybeObject* KeyedStoreIC::Store(State state,
}
// Update inline cache and stub cache.
- if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+ if (FLAG_use_ic && !receiver->IsJSGlobalProxy() &&
+ !(FLAG_harmony_observation && receiver->map()->is_observed())) {
LookupResult lookup(isolate());
if (LookupForWrite(receiver, name, &lookup)) {
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
@@ -1831,8 +1924,10 @@ MaybeObject* KeyedStoreIC::Store(State state,
}
// Do not use ICs for objects that require access checks (including
- // the global object).
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
+ // the global object), or are observed.
+ bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
+ !(FLAG_harmony_observation && object->IsJSObject() &&
+ JSObject::cast(*object)->map()->is_observed());
ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic) {
@@ -1872,10 +1967,10 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
Handle<Object> value) {
ASSERT(!receiver->IsJSGlobalProxy());
ASSERT(StoreICableLookup(lookup));
+ ASSERT(lookup->IsFound());
+
// These are not cacheable, so we never see such LookupResults here.
- ASSERT(lookup->type() != HANDLER);
- // We get only called for properties or transitions, see StoreICableLookup.
- ASSERT(lookup->type() != NULL_DESCRIPTOR);
+ ASSERT(!lookup->IsHandler());
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
@@ -1893,21 +1988,25 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
name, receiver, lookup->GetFieldIndex(),
Handle<Map>::null(), strict_mode);
break;
- case MAP_TRANSITION:
- if (lookup->GetAttributes() == NONE) {
- Handle<Map> transition(lookup->GetTransitionMap());
- int index = transition->PropertyIndexFor(*name);
+ case TRANSITION: {
+ Handle<Map> transition(lookup->GetTransitionTarget());
+ int descriptor = transition->LastAdded();
+
+ DescriptorArray* target_descriptors = transition->instance_descriptors();
+ PropertyDetails details = target_descriptors->GetDetails(descriptor);
+
+ if (details.type() == FIELD && details.attributes() == NONE) {
+ int field_index = target_descriptors->GetFieldIndex(descriptor);
code = isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, index, transition, strict_mode);
+ name, receiver, field_index, transition, strict_mode);
break;
}
// fall through.
+ }
case NORMAL:
case CONSTANT_FUNCTION:
case CALLBACKS:
case INTERCEPTOR:
- case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
// Always rewrite to the generic case so that we do not
// repeatedly try to rewrite.
code = (strict_mode == kStrictMode)
@@ -1915,7 +2014,7 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
: generic_stub();
break;
case HANDLER:
- case NULL_DESCRIPTOR:
+ case NONEXISTENT:
UNREACHABLE();
return;
}
@@ -2050,7 +2149,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
// The length property has to be a writable callback property.
LookupResult debug_lookup(isolate);
receiver->LocalLookup(isolate->heap()->length_symbol(), &debug_lookup);
- ASSERT(debug_lookup.type() == CALLBACKS && !debug_lookup.IsReadOnly());
+ ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly());
#endif
Object* result;
@@ -2491,7 +2590,8 @@ CompareIC::State CompareIC::ComputeState(Code* target) {
Token::Value CompareIC::ComputeOperation(Code* target) {
ASSERT(target->major_key() == CodeStub::CompareIC);
- return static_cast<Token::Value>(target->compare_operation());
+ return static_cast<Token::Value>(
+ target->compare_operation() + Token::EQ);
}
@@ -2501,7 +2601,7 @@ const char* CompareIC::GetStateName(State state) {
case SMIS: return "SMIS";
case HEAP_NUMBERS: return "HEAP_NUMBERS";
case OBJECTS: return "OBJECTS";
- case KNOWN_OBJECTS: return "OBJECTS";
+ case KNOWN_OBJECTS: return "KNOWN_OBJECTS";
case SYMBOLS: return "SYMBOLS";
case STRINGS: return "STRINGS";
case GENERIC: return "GENERIC";
diff --git a/src/3rdparty/v8/src/ic.h b/src/3rdparty/v8/src/ic.h
index 36af768..389c845 100644
--- a/src/3rdparty/v8/src/ic.h
+++ b/src/3rdparty/v8/src/ic.h
@@ -111,7 +111,7 @@ class IC {
RelocInfo::Mode ComputeMode();
bool IsQmlGlobal(Handle<Object> receiver) {
- JSObject* qml_global = isolate_->context()->qml_global();
+ JSObject* qml_global = isolate_->context()->qml_global_object();
return !qml_global->IsUndefined() && qml_global == *receiver;
}
@@ -384,10 +384,16 @@ class KeyedIC: public IC {
STORE_TRANSITION_SMI_TO_OBJECT,
STORE_TRANSITION_SMI_TO_DOUBLE,
STORE_TRANSITION_DOUBLE_TO_OBJECT,
+ STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
+ STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
+ STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
STORE_AND_GROW_NO_TRANSITION,
STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT
+ STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
+ STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT
};
static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
@@ -451,7 +457,7 @@ class KeyedIC: public IC {
private:
void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
- Handle<Code> ComputeMonomorphicStub(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicStub(Handle<Map> receiver_map,
StubKind stub_kind,
StrictModeFlag strict_mode,
Handle<Code> default_stub);
@@ -467,6 +473,12 @@ class KeyedIC: public IC {
static bool IsGrowStubKind(StubKind stub_kind) {
return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
}
+
+ static StubKind GetNoTransitionStubKind(StubKind stub_kind) {
+ if (!IsTransitionStubKind(stub_kind)) return stub_kind;
+ if (IsGrowStubKind(stub_kind)) return STORE_AND_GROW_NO_TRANSITION;
+ return STORE_NO_TRANSITION;
+ }
};
@@ -625,6 +637,18 @@ class StoreIC: public IC {
};
+enum KeyedStoreCheckMap {
+ kDontCheckMap,
+ kCheckMap
+};
+
+
+enum KeyedStoreIncrementLength {
+ kDontIncrementLength,
+ kIncrementLength
+};
+
+
class KeyedStoreIC: public KeyedIC {
public:
explicit KeyedStoreIC(Isolate* isolate) : KeyedIC(isolate) {
@@ -632,7 +656,7 @@ class KeyedStoreIC: public KeyedIC {
}
MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
+ StrictModeFlag strict_mode,
Handle<Object> object,
Handle<Object> name,
Handle<Object> value,
diff --git a/src/3rdparty/v8/src/incremental-marking-inl.h b/src/3rdparty/v8/src/incremental-marking-inl.h
index 2dae6f2..bbe9a9d 100644
--- a/src/3rdparty/v8/src/incremental-marking-inl.h
+++ b/src/3rdparty/v8/src/incremental-marking-inl.h
@@ -48,7 +48,9 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
// Object is either grey or white. It will be scanned if survives.
return false;
}
- return true;
+ if (!is_compacting_) return false;
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ return Marking::IsBlack(obj_bit);
}
@@ -107,9 +109,9 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
// trace it. In this case we switch to non-incremental marking in
// order to finish off this marking phase.
if (FLAG_trace_gc) {
- PrintF("Hurrying incremental marking because of lack of progress\n");
+ PrintPID("Hurrying incremental marking because of lack of progress\n");
}
- allocation_marking_factor_ = kMaxAllocationMarkingFactor;
+ marking_speed_ = kMaxMarkingSpeed;
}
}
@@ -123,27 +125,6 @@ void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
}
-bool IncrementalMarking::MarkObjectAndPush(HeapObject* obj) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- if (!mark_bit.Get()) {
- WhiteToGreyAndPush(obj, mark_bit);
- return true;
- }
- return false;
-}
-
-
-bool IncrementalMarking::MarkObjectWithoutPush(HeapObject* obj) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- if (!mark_bit.Get()) {
- mark_bit.Set();
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
- return true;
- }
- return false;
-}
-
-
} } // namespace v8::internal
#endif // V8_INCREMENTAL_MARKING_INL_H_
diff --git a/src/3rdparty/v8/src/incremental-marking.cc b/src/3rdparty/v8/src/incremental-marking.cc
index 94afffa..b34d6d9 100644
--- a/src/3rdparty/v8/src/incremental-marking.cc
+++ b/src/3rdparty/v8/src/incremental-marking.cc
@@ -31,6 +31,8 @@
#include "code-stubs.h"
#include "compilation-cache.h"
+#include "objects-visiting.h"
+#include "objects-visiting-inl.h"
#include "v8conversions.h"
namespace v8 {
@@ -42,7 +44,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
state_(STOPPED),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(false),
- marker_(this, heap->mark_compact_collector()),
steps_count_(0),
steps_took_(0),
longest_step_(0.0),
@@ -51,7 +52,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
steps_count_since_last_gc_(0),
steps_took_since_last_gc_(0),
should_hurry_(false),
- allocation_marking_factor_(0),
+ marking_speed_(0),
allocated_(0),
no_marking_scope_depth_(0) {
}
@@ -65,7 +66,7 @@ void IncrementalMarking::TearDown() {
void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
Object** slot,
Object* value) {
- if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
+ if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
// Object is not going to be rescanned we need to record the slot.
@@ -80,17 +81,19 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
Object* value,
Isolate* isolate) {
ASSERT(obj->IsHeapObject());
-
- // Fast cases should already be covered by RecordWriteStub.
- ASSERT(value->IsHeapObject());
- ASSERT(!value->IsHeapNumber());
- ASSERT(!value->IsString() ||
- value->IsConsString() ||
- value->IsSlicedString());
- ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
-
IncrementalMarking* marking = isolate->heap()->incremental_marking();
ASSERT(!marking->is_compacting_);
+
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ int counter = chunk->write_barrier_counter();
+ if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
+ marking->write_barriers_invoked_since_last_step_ +=
+ MemoryChunk::kWriteBarrierCounterGranularity -
+ chunk->write_barrier_counter();
+ chunk->set_write_barrier_counter(
+ MemoryChunk::kWriteBarrierCounterGranularity);
+ }
+
marking->RecordWrite(obj, NULL, value);
}
@@ -98,8 +101,20 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
Object** slot,
Isolate* isolate) {
+ ASSERT(obj->IsHeapObject());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
ASSERT(marking->is_compacting_);
+
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ int counter = chunk->write_barrier_counter();
+ if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
+ marking->write_barriers_invoked_since_last_step_ +=
+ MemoryChunk::kWriteBarrierCounterGranularity -
+ chunk->write_barrier_counter();
+ chunk->set_write_barrier_counter(
+ MemoryChunk::kWriteBarrierCounterGranularity);
+ }
+
marking->RecordWrite(obj, slot, *slot);
}
@@ -125,9 +140,9 @@ void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
- Object** slot,
- Code* value) {
- if (BaseRecordWrite(host, slot, value) && is_compacting_) {
+ Object** slot,
+ Code* value) {
+ if (BaseRecordWrite(host, slot, value)) {
ASSERT(slot != NULL);
heap_->mark_compact_collector()->
RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
@@ -160,93 +175,92 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
}
-class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
- public:
- IncrementalMarkingMarkingVisitor(Heap* heap,
- IncrementalMarking* incremental_marking)
- : heap_(heap),
- incremental_marking_(incremental_marking) {
- }
-
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- Object* target = rinfo->target_object();
- if (target->NonFailureIsHeapObject()) {
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- MarkObject(target);
+static void MarkObjectGreyDoNotEnqueue(Object* obj) {
+ if (obj->IsHeapObject()) {
+ HeapObject* heap_obj = HeapObject::cast(obj);
+ MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+ if (Marking::IsBlack(mark_bit)) {
+ MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
+ -heap_obj->Size());
}
+ Marking::AnyToGrey(mark_bit);
}
+}
- void VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
- && (target->ic_age() != heap_->global_ic_age())) {
- IC::Clear(rinfo->pc());
- target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- }
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
- MarkObject(target);
- }
- void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
- MarkObject(target);
+class IncrementalMarkingMarkingVisitor
+ : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
+ public:
+ static void Initialize() {
+ StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
+
+ table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
+ table_.Register(kVisitJSRegExp, &VisitJSRegExp);
}
- void VisitCodeEntry(Address entry_address) {
- Object* target = Code::GetObjectFromEntryAddress(entry_address);
- heap_->mark_compact_collector()->
- RecordCodeEntrySlot(entry_address, Code::cast(target));
- MarkObject(target);
+ static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
+ Context* context = Context::cast(object);
+
+ // We will mark cache black with a separate pass
+ // when we finish marking.
+ MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
+ VisitNativeContext(map, context);
}
- void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {
- if (shared->ic_age() != heap_->global_ic_age()) {
- shared->ResetForNewContext(heap_->global_ic_age());
- }
+ static void VisitJSWeakMap(Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ VisitPointers(heap,
+ HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
+ HeapObject::RawField(object, JSWeakMap::kSize));
}
- void VisitPointer(Object** p) {
+ static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {}
+
+ INLINE(static void VisitPointer(Heap* heap, Object** p)) {
Object* obj = *p;
if (obj->NonFailureIsHeapObject()) {
- heap_->mark_compact_collector()->RecordSlot(p, p, obj);
- MarkObject(obj);
+ heap->mark_compact_collector()->RecordSlot(p, p, obj);
+ MarkObject(heap, obj);
}
}
- void VisitPointers(Object** start, Object** end) {
+ INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
for (Object** p = start; p < end; p++) {
Object* obj = *p;
if (obj->NonFailureIsHeapObject()) {
- heap_->mark_compact_collector()->RecordSlot(start, p, obj);
- MarkObject(obj);
+ heap->mark_compact_collector()->RecordSlot(start, p, obj);
+ MarkObject(heap, obj);
}
}
}
- private:
- // Mark object pointed to by p.
- INLINE(void MarkObject(Object* obj)) {
+ // Marks the object grey and pushes it on the marking stack.
+ INLINE(static void MarkObject(Heap* heap, Object* obj)) {
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+ if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
heap_object->Size());
}
} else if (Marking::IsWhite(mark_bit)) {
- incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+ heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
}
}
- Heap* heap_;
- IncrementalMarking* incremental_marking_;
+ // Marks the object black without pushing it on the marking stack.
+ // Returns true if object needed marking and false otherwise.
+ INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
+ HeapObject* heap_object = HeapObject::cast(obj);
+ MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+ if (Marking::IsWhite(mark_bit)) {
+ mark_bit.Set();
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+ heap_object->Size());
+ return true;
+ }
+ return false;
+ }
};
@@ -290,6 +304,11 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
};
+void IncrementalMarking::Initialize() {
+ IncrementalMarkingMarkingVisitor::Initialize();
+}
+
+
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
bool is_marking,
bool is_compacting) {
@@ -498,19 +517,6 @@ void IncrementalMarking::Start() {
}
-static void MarkObjectGreyDoNotEnqueue(Object* obj) {
- if (obj->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
- if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
- -heap_obj->Size());
- }
- Marking::AnyToGrey(mark_bit);
- }
-}
-
-
void IncrementalMarking::StartMarking(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start marking\n");
@@ -537,8 +543,8 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
ActivateIncrementalWriteBarrier();
-#ifdef DEBUG
// Marking bits are cleared by the sweeper.
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
}
@@ -623,20 +629,50 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
}
-void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
- v->VisitPointers(
- HeapObject::RawField(
- ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
- HeapObject::RawField(
- ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
+void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
+ MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+ if (Marking::IsWhite(map_mark_bit)) {
+ WhiteToGreyAndPush(map, map_mark_bit);
+ }
+
+ IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
+
+ MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
+ SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
+ Marking::MarkBlack(obj_mark_bit);
+ MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
+}
- MarkCompactCollector* collector = heap_->mark_compact_collector();
- for (int idx = Context::FIRST_WEAK_SLOT;
- idx < Context::GLOBAL_CONTEXT_SLOTS;
- ++idx) {
- Object** slot =
- HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
- collector->RecordSlot(slot, slot, *slot);
+
+void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
+ Map* filler_map = heap_->one_pointer_filler_map();
+ while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
+ HeapObject* obj = marking_deque_.Pop();
+
+ // Explicitly skip one word fillers. Incremental markbit patterns are
+ // correct only for objects that occupy at least two words.
+ Map* map = obj->map();
+ if (map == filler_map) continue;
+
+ int size = obj->SizeFromMap(map);
+ bytes_to_process -= size;
+ VisitObject(map, obj, size);
+ }
+}
+
+
+void IncrementalMarking::ProcessMarkingDeque() {
+ Map* filler_map = heap_->one_pointer_filler_map();
+ while (!marking_deque_.IsEmpty()) {
+ HeapObject* obj = marking_deque_.Pop();
+
+ // Explicitly skip one word fillers. Incremental markbit patterns are
+ // correct only for objects that occupy at least two words.
+ Map* map = obj->map();
+ if (map == filler_map) continue;
+
+ VisitObject(map, obj, obj->SizeFromMap(map));
}
}
@@ -650,45 +686,7 @@ void IncrementalMarking::Hurry() {
}
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
- Map* filler_map = heap_->one_pointer_filler_map();
- Map* global_context_map = heap_->global_context_map();
- IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
- while (!marking_deque_.IsEmpty()) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) {
- continue;
- } else if (map == global_context_map) {
- // Global contexts have weak fields.
- VisitGlobalContext(Context::cast(obj), &marking_visitor);
- } else if (map->instance_type() == MAP_TYPE) {
- Map* map = Map::cast(obj);
- heap_->ClearCacheOnMap(map);
-
- // When map collection is enabled we have to mark through map's
- // transitions and back pointers in a special way to make these links
- // weak. Only maps for subclasses of JSReceiver can have transitions.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps &&
- map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
- marker_.MarkMapContents(map);
- } else {
- marking_visitor.VisitPointers(
- HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
- }
- } else {
- obj->Iterate(&marking_visitor);
- }
-
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(!Marking::IsBlack(mark_bit));
- Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
- }
+ ProcessMarkingDeque();
state_ = COMPLETE;
if (FLAG_trace_incremental_marking) {
double end = OS::TimeCurrentMillis();
@@ -704,7 +702,7 @@ void IncrementalMarking::Hurry() {
PolymorphicCodeCache::kSize);
}
- Object* context = heap_->global_contexts_list();
+ Object* context = heap_->native_contexts_list();
while (!context->IsUndefined()) {
// GC can happen when the context is not fully initialized,
// so the cache can be undefined.
@@ -794,11 +792,25 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
allocated_ += allocated_bytes;
- if (allocated_ < kAllocatedThreshold) return;
+ if (allocated_ < kAllocatedThreshold &&
+ write_barriers_invoked_since_last_step_ <
+ kWriteBarriersInvokedThreshold) {
+ return;
+ }
if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
- intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
+ // The marking speed is driven either by the allocation rate or by the rate
+ // at which we are having to check the color of objects in the write barrier.
+ // It is possible for a tight non-allocating loop to run a lot of write
+ // barriers before we get here and check them (marking can only take place on
+ // allocation), so to reduce the lumpiness we don't use the write barriers
+ // invoked since last step directly to determine the amount of work to do.
+ intptr_t bytes_to_process =
+ marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold);
+ allocated_ = 0;
+ write_barriers_invoked_since_last_step_ = 0;
+
bytes_scanned_ += bytes_to_process;
double start = 0;
@@ -813,87 +825,19 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
StartMarking(PREVENT_COMPACTION);
}
} else if (state_ == MARKING) {
- Map* filler_map = heap_->one_pointer_filler_map();
- Map* global_context_map = heap_->global_context_map();
- IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
- while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) continue;
-
- int size = obj->SizeFromMap(map);
- bytes_to_process -= size;
- MarkBit map_mark_bit = Marking::MarkBitFrom(map);
- if (Marking::IsWhite(map_mark_bit)) {
- WhiteToGreyAndPush(map, map_mark_bit);
- }
-
- // TODO(gc) switch to static visitor instead of normal visitor.
- if (map == global_context_map) {
- // Global contexts have weak fields.
- Context* ctx = Context::cast(obj);
-
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
-
- VisitGlobalContext(ctx, &marking_visitor);
- } else if (map->instance_type() == MAP_TYPE) {
- Map* map = Map::cast(obj);
- heap_->ClearCacheOnMap(map);
-
- // When map collection is enabled we have to mark through map's
- // transitions and back pointers in a special way to make these links
- // weak. Only maps for subclasses of JSReceiver can have transitions.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps &&
- map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
- marker_.MarkMapContents(map);
- } else {
- marking_visitor.VisitPointers(
- HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
- }
- } else if (map->instance_type() == JS_FUNCTION_TYPE) {
- marking_visitor.VisitPointers(
- HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
- HeapObject::RawField(obj, JSFunction::kCodeEntryOffset));
-
- marking_visitor.VisitCodeEntry(
- obj->address() + JSFunction::kCodeEntryOffset);
-
- marking_visitor.VisitPointers(
- HeapObject::RawField(obj,
- JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(obj,
- JSFunction::kNonWeakFieldsEndOffset));
- } else {
- obj->IterateBody(map->instance_type(), size, &marking_visitor);
- }
-
- MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
- SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
- Marking::MarkBlack(obj_mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
- }
+ ProcessMarkingDeque(bytes_to_process);
if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
- allocated_ = 0;
-
steps_count_++;
steps_count_since_last_gc_++;
bool speed_up = false;
- if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
+ if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
if (FLAG_trace_gc) {
- PrintF("Speed up marking after %d steps\n",
- static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
+ PrintPID("Speed up marking after %d steps\n",
+ static_cast<int>(kMarkingSpeedAccellerationInterval));
}
speed_up = true;
}
@@ -902,35 +846,35 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
(old_generation_space_available_at_start_of_incremental_ < 10 * MB);
bool only_1_nth_of_space_that_was_available_still_left =
- (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
+ (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
old_generation_space_available_at_start_of_incremental_);
if (space_left_is_very_small ||
only_1_nth_of_space_that_was_available_still_left) {
- if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
+ if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
speed_up = true;
}
bool size_of_old_space_multiplied_by_n_during_marking =
(heap_->PromotedTotalSize() >
- (allocation_marking_factor_ + 1) *
+ (marking_speed_ + 1) *
old_generation_space_used_at_start_of_incremental_);
if (size_of_old_space_multiplied_by_n_during_marking) {
speed_up = true;
if (FLAG_trace_gc) {
- PrintF("Speed up marking because of heap size increase\n");
+ PrintPID("Speed up marking because of heap size increase\n");
}
}
int64_t promoted_during_marking = heap_->PromotedTotalSize()
- old_generation_space_used_at_start_of_incremental_;
- intptr_t delay = allocation_marking_factor_ * MB;
+ intptr_t delay = marking_speed_ * MB;
intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
// We try to scan at at least twice the speed that we are allocating.
if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
if (FLAG_trace_gc) {
- PrintF("Speed up marking because marker was not keeping up\n");
+ PrintPID("Speed up marking because marker was not keeping up\n");
}
speed_up = true;
}
@@ -938,15 +882,15 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
if (speed_up) {
if (state_ != MARKING) {
if (FLAG_trace_gc) {
- PrintF("Postponing speeding up marking until marking starts\n");
+ PrintPID("Postponing speeding up marking until marking starts\n");
}
} else {
- allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
- allocation_marking_factor_ = static_cast<int>(
- Min(kMaxAllocationMarkingFactor,
- static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
+ marking_speed_ += kMarkingSpeedAccellerationInterval;
+ marking_speed_ = static_cast<int>(
+ Min(kMaxMarkingSpeed,
+ static_cast<intptr_t>(marking_speed_ * 1.3)));
if (FLAG_trace_gc) {
- PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+ PrintPID("Marking speed increased to %d\n", marking_speed_);
}
}
}
@@ -972,8 +916,9 @@ void IncrementalMarking::ResetStepCounters() {
steps_count_since_last_gc_ = 0;
steps_took_since_last_gc_ = 0;
bytes_rescanned_ = 0;
- allocation_marking_factor_ = kInitialAllocationMarkingFactor;
+ marking_speed_ = kInitialMarkingSpeed;
bytes_scanned_ = 0;
+ write_barriers_invoked_since_last_step_ = 0;
}
diff --git a/src/3rdparty/v8/src/incremental-marking.h b/src/3rdparty/v8/src/incremental-marking.h
index 39e8dae..6ae0f59 100644
--- a/src/3rdparty/v8/src/incremental-marking.h
+++ b/src/3rdparty/v8/src/incremental-marking.h
@@ -53,6 +53,8 @@ class IncrementalMarking {
explicit IncrementalMarking(Heap* heap);
+ static void Initialize();
+
void TearDown();
State state() {
@@ -93,21 +95,23 @@ class IncrementalMarking {
// progress in the face of the mutator creating new work for it. We start
// of at a moderate rate of work and gradually increase the speed of the
// incremental marker until it completes.
- // Do some marking every time this much memory has been allocated.
+ // Do some marking every time this much memory has been allocated or that many
+ // heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536;
+ static const intptr_t kWriteBarriersInvokedThreshold = 65536;
// Start off by marking this many times more memory than has been allocated.
- static const intptr_t kInitialAllocationMarkingFactor = 1;
+ static const intptr_t kInitialMarkingSpeed = 1;
// But if we are promoting a lot of data we need to mark faster to keep up
// with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3;
// After this many steps we increase the marking/allocating factor.
- static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
+ static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by.
- static const intptr_t kAllocationMarkingFactorSpeedup = 2;
- static const intptr_t kMaxAllocationMarkingFactor = 1000;
+ static const intptr_t kMarkingSpeedAccelleration = 2;
+ static const intptr_t kMaxMarkingSpeed = 1000;
void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialAllocationMarkingFactor,
+ Step(allocated * kFastMarking / kInitialMarkingSpeed,
GC_VIA_STACK_GUARD);
}
@@ -130,6 +134,12 @@ class IncrementalMarking {
Object** slot,
Isolate* isolate);
+ // Record a slot for compaction. Returns false for objects that are
+ // guaranteed to be rescanned or not guaranteed to survive.
+ //
+ // No slots in white objects should be recorded, as some slots are typed and
+ // cannot be interpreted correctly if the underlying object does not survive
+ // the incremental cycle (stays white).
INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWriteIntoCode(HeapObject* obj,
@@ -167,16 +177,6 @@ class IncrementalMarking {
return true;
}
- // Marks the object grey and pushes it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for incremental marking only.
- INLINE(bool MarkObjectAndPush(HeapObject* obj));
-
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for incremental marking only.
- INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
-
inline int steps_count() {
return steps_count_;
}
@@ -213,12 +213,13 @@ class IncrementalMarking {
void NotifyOfHighPromotionRate() {
if (IsMarking()) {
- if (allocation_marking_factor_ < kFastMarking) {
+ if (marking_speed_ < kFastMarking) {
if (FLAG_trace_gc) {
- PrintF("Increasing marking speed to %d due to high promotion rate\n",
- static_cast<int>(kFastMarking));
+ PrintPID("Increasing marking speed to %d "
+ "due to high promotion rate\n",
+ static_cast<int>(kFastMarking));
}
- allocation_marking_factor_ = kFastMarking;
+ marking_speed_ = kFastMarking;
}
}
}
@@ -258,7 +259,11 @@ class IncrementalMarking {
void EnsureMarkingDequeIsCommitted();
- void VisitGlobalContext(Context* ctx, ObjectVisitor* v);
+ INLINE(void ProcessMarkingDeque());
+
+ INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process));
+
+ INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
Heap* heap_;
@@ -268,7 +273,6 @@ class IncrementalMarking {
VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
- Marker<IncrementalMarking> marker_;
int steps_count_;
double steps_took_;
@@ -279,9 +283,10 @@ class IncrementalMarking {
double steps_took_since_last_gc_;
int64_t bytes_rescanned_;
bool should_hurry_;
- int allocation_marking_factor_;
+ int marking_speed_;
intptr_t bytes_scanned_;
intptr_t allocated_;
+ intptr_t write_barriers_invoked_since_last_step_;
int no_marking_scope_depth_;
diff --git a/src/3rdparty/v8/src/interface.cc b/src/3rdparty/v8/src/interface.cc
index 7836110..336be82 100644
--- a/src/3rdparty/v8/src/interface.cc
+++ b/src/3rdparty/v8/src/interface.cc
@@ -41,11 +41,13 @@ static bool Match(void* key1, void* key2) {
}
-Interface* Interface::Lookup(Handle<String> name) {
+Interface* Interface::Lookup(Handle<String> name, Zone* zone) {
ASSERT(IsModule());
ZoneHashMap* map = Chase()->exports_;
if (map == NULL) return NULL;
- ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false);
+ ZoneAllocationPolicy allocator(zone);
+ ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false,
+ allocator);
if (p == NULL) return NULL;
ASSERT(*static_cast<String**>(p->key) == *name);
ASSERT(p->value != NULL);
@@ -69,7 +71,7 @@ int Nesting::current_ = 0;
void Interface::DoAdd(
- void* name, uint32_t hash, Interface* interface, bool* ok) {
+ void* name, uint32_t hash, Interface* interface, Zone* zone, bool* ok) {
MakeModule(ok);
if (!*ok) return;
@@ -85,9 +87,13 @@ void Interface::DoAdd(
#endif
ZoneHashMap** map = &Chase()->exports_;
- if (*map == NULL) *map = new ZoneHashMap(Match, 8);
+ ZoneAllocationPolicy allocator(zone);
- ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen());
+ if (*map == NULL)
+ *map = new ZoneHashMap(Match, ZoneHashMap::kDefaultHashMapCapacity,
+ allocator);
+
+ ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen(), allocator);
if (p == NULL) {
// This didn't have name but was frozen already, that's an error.
*ok = false;
@@ -97,7 +103,7 @@ void Interface::DoAdd(
#ifdef DEBUG
Nesting nested;
#endif
- static_cast<Interface*>(p->value)->Unify(interface, ok);
+ static_cast<Interface*>(p->value)->Unify(interface, zone, ok);
}
#ifdef DEBUG
@@ -110,16 +116,24 @@ void Interface::DoAdd(
}
-void Interface::Unify(Interface* that, bool* ok) {
- if (this->forward_) return this->Chase()->Unify(that, ok);
- if (that->forward_) return this->Unify(that->Chase(), ok);
+void Interface::Unify(Interface* that, Zone* zone, bool* ok) {
+ if (this->forward_) return this->Chase()->Unify(that, zone, ok);
+ if (that->forward_) return this->Unify(that->Chase(), zone, ok);
ASSERT(this->forward_ == NULL);
ASSERT(that->forward_ == NULL);
*ok = true;
if (this == that) return;
- if (this->IsValue()) return that->MakeValue(ok);
- if (that->IsValue()) return this->MakeValue(ok);
+ if (this->IsValue()) {
+ that->MakeValue(ok);
+ if (*ok && this->IsConst()) that->MakeConst(ok);
+ return;
+ }
+ if (that->IsValue()) {
+ this->MakeValue(ok);
+ if (*ok && that->IsConst()) this->MakeConst(ok);
+ return;
+ }
#ifdef DEBUG
if (FLAG_print_interface_details) {
@@ -134,9 +148,9 @@ void Interface::Unify(Interface* that, bool* ok) {
// Merge the smaller interface into the larger, for performance.
if (this->exports_ != NULL && (that->exports_ == NULL ||
this->exports_->occupancy() >= that->exports_->occupancy())) {
- this->DoUnify(that, ok);
+ this->DoUnify(that, ok, zone);
} else {
- that->DoUnify(this, ok);
+ that->DoUnify(this, ok, zone);
}
#ifdef DEBUG
@@ -151,7 +165,7 @@ void Interface::Unify(Interface* that, bool* ok) {
}
-void Interface::DoUnify(Interface* that, bool* ok) {
+void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
ASSERT(this->forward_ == NULL);
ASSERT(that->forward_ == NULL);
ASSERT(!this->IsValue());
@@ -166,7 +180,7 @@ void Interface::DoUnify(Interface* that, bool* ok) {
ZoneHashMap* map = that->exports_;
if (map != NULL) {
for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
- this->DoAdd(p->key, p->hash, static_cast<Interface*>(p->value), ok);
+ this->DoAdd(p->key, p->hash, static_cast<Interface*>(p->value), zone, ok);
if (!*ok) return;
}
}
@@ -208,6 +222,8 @@ void Interface::Print(int n) {
if (IsUnknown()) {
PrintF("unknown\n");
+ } else if (IsConst()) {
+ PrintF("const\n");
} else if (IsValue()) {
PrintF("value\n");
} else if (IsModule()) {
diff --git a/src/3rdparty/v8/src/interface.h b/src/3rdparty/v8/src/interface.h
index 580f082..94ef11b 100644
--- a/src/3rdparty/v8/src/interface.h
+++ b/src/3rdparty/v8/src/interface.h
@@ -36,29 +36,45 @@ namespace internal {
// This class implements the following abstract grammar of interfaces
// (i.e. module types):
-// interface ::= UNDETERMINED | VALUE | MODULE(exports)
+// interface ::= UNDETERMINED | VALUE | CONST | MODULE(exports)
// exports ::= {name : interface, ...}
-// A frozen module type is one that is fully determined. Unification does not
-// allow adding additional exports to frozen interfaces.
-// Otherwise, unifying modules merges their exports.
+// A frozen type is one that is fully determined. Unification does not
+// allow to turn non-const values into const, or adding additional exports to
+// frozen interfaces. Otherwise, unifying modules merges their exports.
// Undetermined types are unification variables that can be unified freely.
+// There is a natural subsort lattice that reflects the increase of knowledge:
+//
+// undetermined
+// // | \\ .
+// value (frozen) module
+// // \\ / \ //
+// const fr.value fr.module
+// \\ /
+// fr.const
+//
+// where the bold lines are the only transitions allowed.
class Interface : public ZoneObject {
public:
// ---------------------------------------------------------------------------
// Factory methods.
+ static Interface* NewUnknown(Zone* zone) {
+ return new(zone) Interface(NONE);
+ }
+
static Interface* NewValue() {
static Interface value_interface(VALUE + FROZEN); // Cached.
return &value_interface;
}
- static Interface* NewUnknown() {
- return new Interface(NONE);
+ static Interface* NewConst() {
+ static Interface value_interface(VALUE + CONST + FROZEN); // Cached.
+ return &value_interface;
}
- static Interface* NewModule() {
- return new Interface(MODULE);
+ static Interface* NewModule(Zone* zone) {
+ return new(zone) Interface(MODULE);
}
// ---------------------------------------------------------------------------
@@ -66,13 +82,13 @@ class Interface : public ZoneObject {
// Add a name to the list of exports. If it already exists, unify with
// interface, otherwise insert unless this is closed.
- void Add(Handle<String> name, Interface* interface, bool* ok) {
- DoAdd(name.location(), name->Hash(), interface, ok);
+ void Add(Handle<String> name, Interface* interface, Zone* zone, bool* ok) {
+ DoAdd(name.location(), name->Hash(), interface, zone, ok);
}
// Unify with another interface. If successful, both interface objects will
// represent the same type, and changes to one are reflected in the other.
- void Unify(Interface* that, bool* ok);
+ void Unify(Interface* that, Zone* zone, bool* ok);
// Determine this interface to be a value interface.
void MakeValue(bool* ok) {
@@ -80,6 +96,12 @@ class Interface : public ZoneObject {
if (*ok) Chase()->flags_ |= VALUE;
}
+ // Determine this interface to be an immutable interface.
+ void MakeConst(bool* ok) {
+ *ok = !IsModule() && (IsConst() || !IsFrozen());
+ if (*ok) Chase()->flags_ |= VALUE + CONST;
+ }
+
// Determine this interface to be a module interface.
void MakeModule(bool* ok) {
*ok = !IsValue();
@@ -107,6 +129,9 @@ class Interface : public ZoneObject {
// Check whether this is a value type.
bool IsValue() { return Chase()->flags_ & VALUE; }
+ // Check whether this is a constant type.
+ bool IsConst() { return Chase()->flags_ & CONST; }
+
// Check whether this is a module type.
bool IsModule() { return Chase()->flags_ & MODULE; }
@@ -116,7 +141,7 @@ class Interface : public ZoneObject {
Handle<JSModule> Instance() { return Chase()->instance_; }
// Look up an exported name. Returns NULL if not (yet) defined.
- Interface* Lookup(Handle<String> name);
+ Interface* Lookup(Handle<String> name, Zone* zone);
// ---------------------------------------------------------------------------
// Iterators.
@@ -161,8 +186,9 @@ class Interface : public ZoneObject {
enum Flags { // All flags are monotonic
NONE = 0,
VALUE = 1, // This type describes a value
- MODULE = 2, // This type describes a module
- FROZEN = 4 // This type is fully determined
+ CONST = 2, // This type describes a constant
+ MODULE = 4, // This type describes a module
+ FROZEN = 8 // This type is fully determined
};
int flags_;
@@ -187,8 +213,9 @@ class Interface : public ZoneObject {
return result;
}
- void DoAdd(void* name, uint32_t hash, Interface* interface, bool* ok);
- void DoUnify(Interface* that, bool* ok);
+ void DoAdd(void* name, uint32_t hash, Interface* interface, Zone* zone,
+ bool* ok);
+ void DoUnify(Interface* that, bool* ok, Zone* zone);
};
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/isolate.cc b/src/3rdparty/v8/src/isolate.cc
index 63029ab..3a6099b 100644
--- a/src/3rdparty/v8/src/isolate.cc
+++ b/src/3rdparty/v8/src/isolate.cc
@@ -257,7 +257,7 @@ void Isolate::PreallocatedStorageInit(size_t size) {
void* Isolate::PreallocatedStorageNew(size_t size) {
if (!preallocated_storage_preallocated_) {
- return FreeStoreAllocationPolicy::New(size);
+ return FreeStoreAllocationPolicy().New(size);
}
ASSERT(free_list_.next_ != &free_list_);
ASSERT(free_list_.previous_ != &free_list_);
@@ -478,6 +478,14 @@ void Isolate::Iterate(ObjectVisitor* v) {
Iterate(v, current_t);
}
+void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
+ for (DeferredHandles* deferred = deferred_handles_head_;
+ deferred != NULL;
+ deferred = deferred->next_) {
+ deferred->Iterate(visitor);
+ }
+}
+
void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
// The ARM simulator has a separate JS stack. We therefore register
@@ -528,6 +536,24 @@ Handle<String> Isolate::StackTraceString() {
}
+void Isolate::PushStackTraceAndDie(unsigned int magic,
+ Object* object,
+ Map* map,
+ unsigned int magic2) {
+ const int kMaxStackTraceSize = 8192;
+ Handle<String> trace = StackTraceString();
+ char buffer[kMaxStackTraceSize];
+ int length = Min(kMaxStackTraceSize - 1, trace->length());
+ String::WriteToFlat(*trace, buffer, 0, length);
+ buffer[length] = '\0';
+ OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n",
+ magic, magic2,
+ static_cast<void*>(object), static_cast<void*>(map),
+ buffer);
+ OS::Abort();
+}
+
+
void Isolate::CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object) {
if (capture_stack_trace_for_uncaught_exceptions_) {
// Capture stack trace for a detailed exception message.
@@ -549,8 +575,6 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
Handle<String> column_key = factory()->LookupAsciiSymbol("column");
Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
- Handle<String> name_or_source_url_key =
- factory()->LookupAsciiSymbol("nameOrSourceURL");
Handle<String> script_name_or_source_url_key =
factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
@@ -610,18 +634,7 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
}
if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> script_name(script->name(), this);
- Handle<JSValue> script_wrapper = GetScriptWrapper(script);
- Handle<Object> property = GetProperty(script_wrapper,
- name_or_source_url_key);
- ASSERT(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
- NULL, &caught_exception);
- if (caught_exception) {
- result = factory()->undefined_value();
- }
+ Handle<Object> result = GetScriptNameOrSourceURL(script);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, script_name_or_source_url_key,
@@ -744,7 +757,7 @@ void Isolate::SetFailedAccessCheckCallback(
thread_local_top()->failed_access_check_callback_ = callback;
}
-
+
void Isolate::SetUserObjectComparisonCallback(
v8::UserObjectComparisonCallback callback) {
thread_local_top()->user_object_comparison_callback_ = callback;
@@ -788,16 +801,17 @@ static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
if (isolate->bootstrapper()->IsActive()) return YES;
if (receiver->IsJSGlobalProxy()) {
- Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
+ Object* receiver_context = JSGlobalProxy::cast(receiver)->native_context();
if (!receiver_context->IsContext()) return NO;
- // Get the global context of current top context.
- // avoid using Isolate::global_context() because it uses Handle.
- Context* global_context = isolate->context()->global()->global_context();
- if (receiver_context == global_context) return YES;
+ // Get the native context of current top context.
+ // avoid using Isolate::native_context() because it uses Handle.
+ Context* native_context =
+ isolate->context()->global_object()->native_context();
+ if (receiver_context == native_context) return YES;
if (Context::cast(receiver_context)->security_token() ==
- global_context->security_token())
+ native_context->security_token())
return YES;
}
@@ -928,7 +942,7 @@ Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
}
-Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
+Failure* Isolate::ReThrow(MaybeObject* exception) {
bool can_be_caught_externally = false;
bool catchable_by_javascript = is_catchable_by_javascript(exception);
ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
@@ -1118,6 +1132,14 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
stack_trace_for_uncaught_exceptions_options_);
}
}
+ // Stringify custom error objects for the message object.
+ if (exception_handle->IsJSObject() && !IsErrorObject(exception_handle)) {
+ bool failed = false;
+ exception_handle = Execution::ToString(exception_handle, &failed);
+ if (failed) {
+ exception_handle = factory()->LookupAsciiSymbol("exception");
+ }
+ }
Handle<Object> message_obj = MessageHandler::MakeMessageObject(
"uncaught_exception",
location,
@@ -1138,8 +1160,18 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// to the console for easier debugging.
int line_number = GetScriptLineNumberSafe(location->script(),
location->start_pos());
- OS::PrintError("Extension or internal compilation error at line %d.\n",
- line_number);
+ if (exception->IsString()) {
+ OS::PrintError(
+ "Extension or internal compilation error: %s in %s at line %d.\n",
+ *String::cast(exception)->ToCString(),
+ *String::cast(location->script()->name())->ToCString(),
+ line_number + 1);
+ } else {
+ OS::PrintError(
+ "Extension or internal compilation error in %s at line %d.\n",
+ *String::cast(location->script()->name())->ToCString(),
+ line_number + 1);
+ }
}
}
@@ -1202,7 +1234,7 @@ void Isolate::ReportPendingMessages() {
PropagatePendingExceptionToExternalTryCatch();
// If the pending exception is OutOfMemoryException set out_of_memory in
- // the global context. Note: We have to mark the global context here
+ // the native context. Note: We have to mark the native context here
// since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
// set it.
HandleScope scope;
@@ -1312,20 +1344,26 @@ bool Isolate::is_out_of_memory() {
}
+Handle<Context> Isolate::native_context() {
+ GlobalObject* global = thread_local_top()->context_->global_object();
+ return Handle<Context>(global->native_context());
+}
+
+
Handle<Context> Isolate::global_context() {
- GlobalObject* global = thread_local_top()->context_->global();
+ GlobalObject* global = thread_local_top()->context_->global_object();
return Handle<Context>(global->global_context());
}
-Handle<Context> Isolate::GetCallingGlobalContext() {
+Handle<Context> Isolate::GetCallingNativeContext() {
JavaScriptFrameIterator it;
#ifdef ENABLE_DEBUGGER_SUPPORT
if (debug_->InDebugger()) {
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
- if (context->global_context() == *debug_->debug_context()) {
+ if (context->native_context() == *debug_->debug_context()) {
it.Advance();
} else {
break;
@@ -1336,7 +1374,7 @@ Handle<Context> Isolate::GetCallingGlobalContext() {
if (it.done()) return Handle<Context>::null();
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
- return Handle<Context>(context->global_context());
+ return Handle<Context>(context->native_context());
}
@@ -1467,6 +1505,7 @@ Isolate::Isolate()
descriptor_lookup_cache_(NULL),
handle_scope_implementer_(NULL),
unicode_cache_(NULL),
+ runtime_zone_(this),
in_use_list_(0),
free_list_(0),
preallocated_storage_preallocated_(false),
@@ -1480,14 +1519,15 @@ Isolate::Isolate()
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
- context_exit_happened_(false) {
+ context_exit_happened_(false),
+ deferred_handles_head_(NULL),
+ optimizing_compiler_thread_(this) {
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,
sizeof(isolate_addresses_[0]) * (kIsolateAddressCount + 1));
heap_.isolate_ = this;
- zone_.isolate_ = this;
stack_guard_.isolate_ = this;
// ThreadManager is initialized early to support locking an isolate
@@ -1544,6 +1584,11 @@ void Isolate::TearDown() {
thread_data_table_->RemoveAllThreads(this);
}
+ if (serialize_partial_snapshot_cache_ != NULL) {
+ delete[] serialize_partial_snapshot_cache_;
+ serialize_partial_snapshot_cache_ = NULL;
+ }
+
if (!IsDefaultIsolate()) {
delete this;
}
@@ -1557,6 +1602,8 @@ void Isolate::Deinit() {
if (state_ == INITIALIZED) {
TRACE_ISOLATE(deinit);
+ if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
+
if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
// We must stop the logger before we tear down other components.
@@ -1592,6 +1639,26 @@ void Isolate::Deinit() {
}
+void Isolate::PushToPartialSnapshotCache(Object* obj) {
+ int length = serialize_partial_snapshot_cache_length();
+ int capacity = serialize_partial_snapshot_cache_capacity();
+
+ if (length >= capacity) {
+ int new_capacity = static_cast<int>((capacity + 10) * 1.2);
+ Object** new_array = new Object*[new_capacity];
+ for (int i = 0; i < length; i++) {
+ new_array[i] = serialize_partial_snapshot_cache()[i];
+ }
+ if (capacity != 0) delete[] serialize_partial_snapshot_cache();
+ set_serialize_partial_snapshot_cache(new_array);
+ set_serialize_partial_snapshot_cache_capacity(new_capacity);
+ }
+
+ serialize_partial_snapshot_cache()[length] = obj;
+ set_serialize_partial_snapshot_cache_length(length + 1);
+}
+
+
void Isolate::SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data) {
Thread::SetThreadLocal(isolate_key_, isolate);
@@ -1603,7 +1670,7 @@ Isolate::~Isolate() {
TRACE_ISOLATE(destructor);
// Has to be called while counters_ are still alive.
- zone_.DeleteKeptSegment();
+ runtime_zone_.DeleteKeptSegment();
delete[] assembler_spare_buffer_;
assembler_spare_buffer_ = NULL;
@@ -1740,10 +1807,8 @@ bool Isolate::Init(Deserializer* des) {
ASSERT(Isolate::Current() == this);
TRACE_ISOLATE(init);
-#ifdef DEBUG
// The initialization process does not handle memory exhaustion.
DisallowAllocationFailure disallow_allocation_failure;
-#endif
InitializeLoggingAndCounters();
@@ -1775,7 +1840,7 @@ bool Isolate::Init(Deserializer* des) {
global_handles_ = new GlobalHandles(this);
bootstrapper_ = new Bootstrapper();
handle_scope_implementer_ = new HandleScopeImplementer(this);
- stub_cache_ = new StubCache(this);
+ stub_cache_ = new StubCache(this, runtime_zone());
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
@@ -1809,6 +1874,11 @@ bool Isolate::Init(Deserializer* des) {
return false;
}
+ if (create_heap_objects) {
+ // Terminate the cache array with the sentinel so we can iterate.
+ PushToPartialSnapshotCache(heap_.undefined_value());
+ }
+
InitializeThreadLocal();
bootstrapper_->Initialize(create_heap_objects);
@@ -1835,7 +1905,7 @@ bool Isolate::Init(Deserializer* des) {
#endif
// If we are deserializing, read the state into the now-empty heap.
- if (des != NULL) {
+ if (!create_heap_objects) {
des->Deserialize();
}
stub_cache_->Initialize();
@@ -1850,7 +1920,7 @@ bool Isolate::Init(Deserializer* des) {
heap_.SetStackLimits();
// Quiet the heap NaN if needed on target platform.
- if (des != NULL) Assembler::QuietNaN(heap_.nan_value());
+ if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
@@ -1858,7 +1928,8 @@ bool Isolate::Init(Deserializer* des) {
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.
- if (des != NULL && (FLAG_log_code || FLAG_ll_prof)) {
+ if (create_heap_objects &&
+ (FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) {
HandleScope scope;
LOG(this, LogCodeObjects());
LOG(this, LogCompiledFunctions());
@@ -1873,6 +1944,7 @@ bool Isolate::Init(Deserializer* des) {
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
+ if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
return true;
}
@@ -1956,6 +2028,36 @@ void Isolate::Exit() {
}
+void Isolate::LinkDeferredHandles(DeferredHandles* deferred) {
+ deferred->next_ = deferred_handles_head_;
+ if (deferred_handles_head_ != NULL) {
+ deferred_handles_head_->previous_ = deferred;
+ }
+ deferred_handles_head_ = deferred;
+}
+
+
+void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
+#ifdef DEBUG
+ // In debug mode assert that the linked list is well-formed.
+ DeferredHandles* deferred_iterator = deferred;
+ while (deferred_iterator->previous_ != NULL) {
+ deferred_iterator = deferred_iterator->previous_;
+ }
+ ASSERT(deferred_handles_head_ == deferred_iterator);
+#endif
+ if (deferred_handles_head_ == deferred) {
+ deferred_handles_head_ = deferred_handles_head_->next_;
+ }
+ if (deferred->next_ != NULL) {
+ deferred->next_->previous_ = deferred->previous_;
+ }
+ if (deferred->previous_ != NULL) {
+ deferred->previous_->next_ = deferred->next_;
+ }
+}
+
+
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/src/3rdparty/v8/src/isolate.h b/src/3rdparty/v8/src/isolate.h
index a865d99..889a3e1 100644
--- a/src/3rdparty/v8/src/isolate.h
+++ b/src/3rdparty/v8/src/isolate.h
@@ -41,6 +41,7 @@
#include "handles.h"
#include "hashmap.h"
#include "heap.h"
+#include "optimizing-compiler-thread.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
#include "runtime.h"
@@ -310,19 +311,20 @@ class ThreadLocalTop BASE_EMBEDDED {
#define ISOLATE_INIT_ARRAY_LIST(V) \
/* SerializerDeserializer state. */ \
- V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
- V(int, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
+ V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
+typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* SerializerDeserializer state. */ \
V(int, serialize_partial_snapshot_cache_length, 0) \
+ V(int, serialize_partial_snapshot_cache_capacity, 0) \
+ V(Object**, serialize_partial_snapshot_cache, NULL) \
/* Assembler state. */ \
/* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
V(byte*, assembler_spare_buffer, NULL) \
@@ -330,7 +332,7 @@ typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
V(v8::Debug::MessageHandler, message_handler, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
- /* function cache of the global context. */ \
+ /* function cache of the native context. */ \
V(int, next_serial_number, 0) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
V(bool, always_allow_natives_syntax, false) \
@@ -355,6 +357,7 @@ typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
V(uint64_t, enabled_cpu_features, 0) \
V(CpuProfiler*, cpu_profiler, NULL) \
V(HeapProfiler*, heap_profiler, NULL) \
+ V(bool, observer_delivery_pending, false) \
ISOLATE_DEBUGGER_INIT_LIST(V)
class Isolate {
@@ -530,6 +533,11 @@ class Isolate {
thread_local_top_.save_context_ = save;
}
+ // Access to the map of "new Object()".
+ Map* empty_object_map() {
+ return context()->native_context()->object_function()->map();
+ }
+
// Access to current thread id.
ThreadId thread_id() { return thread_local_top_.thread_id_; }
void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
@@ -581,6 +589,20 @@ class Isolate {
MaybeObject** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
}
+
+ Address pending_message_obj_address() {
+ return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
+ }
+
+ Address has_pending_message_address() {
+ return reinterpret_cast<Address>(&thread_local_top_.has_pending_message_);
+ }
+
+ Address pending_message_script_address() {
+ return reinterpret_cast<Address>(
+ &thread_local_top_.pending_message_script_);
+ }
+
MaybeObject* scheduled_exception() {
ASSERT(has_scheduled_exception());
return thread_local_top_.scheduled_exception_;
@@ -599,6 +621,9 @@ class Isolate {
(exception != heap()->termination_exception());
}
+ // Serializer.
+ void PushToPartialSnapshotCache(Object* obj);
+
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
return thread->c_entry_fp_;
@@ -623,8 +648,8 @@ class Isolate {
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
- Handle<GlobalObject> global() {
- return Handle<GlobalObject>(context()->global());
+ Handle<GlobalObject> global_object() {
+ return Handle<GlobalObject>(context()->global_object());
}
// Returns the global proxy object of the current context.
@@ -686,6 +711,10 @@ class Isolate {
void PrintStack(StringStream* accumulator);
void PrintStack();
Handle<String> StackTraceString();
+ NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
+ Object* object,
+ Map* map,
+ unsigned int magic2));
Handle<JSArray> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
@@ -716,7 +745,7 @@ class Isolate {
// Re-throw an exception. This involves no error reporting since
// error reporting was handled when the exception was thrown
// originally.
- Failure* ReThrow(MaybeObject* exception, MessageLocation* location = NULL);
+ Failure* ReThrow(MaybeObject* exception);
void ScheduleThrow(Object* exception);
void ReportPendingMessages();
Failure* ThrowIllegalOperation();
@@ -748,12 +777,13 @@ class Isolate {
void IterateThread(ThreadVisitor* v, char* t);
- // Returns the current global context.
+ // Returns the current native and global context.
+ Handle<Context> native_context();
Handle<Context> global_context();
- // Returns the global context of the calling JavaScript code. That
- // is, the global context of the top-most JavaScript frame.
- Handle<Context> GetCallingGlobalContext();
+ // Returns the native context of the calling JavaScript code. That
+ // is, the native context of the top-most JavaScript frame.
+ Handle<Context> GetCallingNativeContext();
void RegisterTryCatchHandler(v8::TryCatch* that);
void UnregisterTryCatchHandler(v8::TryCatch* that);
@@ -787,12 +817,12 @@ class Isolate {
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR
-#define GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
Handle<type> name() { \
- return Handle<type>(context()->global_context()->name()); \
+ return Handle<type>(context()->native_context()->name()); \
}
- GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSOR)
-#undef GLOBAL_CONTEXT_FIELD_ACCESSOR
+ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
+#undef NATIVE_CONTEXT_FIELD_ACCESSOR
Bootstrapper* bootstrapper() { return bootstrapper_; }
Counters* counters() {
@@ -844,7 +874,7 @@ class Isolate {
ASSERT(handle_scope_implementer_);
return handle_scope_implementer_;
}
- Zone* zone() { return &zone_; }
+ Zone* runtime_zone() { return &runtime_zone_; }
UnicodeCache* unicode_cache() {
return unicode_cache_;
@@ -970,10 +1000,7 @@ class Isolate {
Factory* factory() { return reinterpret_cast<Factory*>(this); }
- // SerializerDeserializer state.
- static const int kPartialSnapshotCacheCapacity = 1400;
-
- static const int kJSRegexpStaticOffsetsVectorSize = 50;
+ static const int kJSRegexpStaticOffsetsVectorSize = 128;
Address external_callback() {
return thread_local_top_.external_callback_;
@@ -1040,6 +1067,14 @@ class Isolate {
date_cache_ = date_cache;
}
+ void IterateDeferredHandles(ObjectVisitor* visitor);
+ void LinkDeferredHandles(DeferredHandles* deferred_handles);
+ void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
+
+ OptimizingCompilerThread* optimizing_compiler_thread() {
+ return &optimizing_compiler_thread_;
+ }
+
private:
Isolate();
@@ -1190,7 +1225,7 @@ class Isolate {
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
- Zone zone_;
+ Zone runtime_zone_;
PreallocatedStorage in_use_list_;
PreallocatedStorage free_list_;
bool preallocated_storage_preallocated_;
@@ -1263,8 +1298,13 @@ class Isolate {
#undef ISOLATE_FIELD_OFFSET
#endif
+ DeferredHandles* deferred_handles_head_;
+ OptimizingCompilerThread optimizing_compiler_thread_;
+
friend class ExecutionAccess;
+ friend class HandleScopeImplementer;
friend class IsolateInitializer;
+ friend class OptimizingCompilerThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
@@ -1402,19 +1442,18 @@ class PostponeInterruptsScope BASE_EMBEDDED {
#define HEAP (v8::internal::Isolate::Current()->heap())
#define FACTORY (v8::internal::Isolate::Current()->factory())
#define ISOLATE (v8::internal::Isolate::Current())
-#define ZONE (v8::internal::Isolate::Current()->zone())
#define LOGGER (v8::internal::Isolate::Current()->logger())
-// Tells whether the global context is marked with out of memory.
+// Tells whether the native context is marked with out of memory.
inline bool Context::has_out_of_memory() {
- return global_context()->out_of_memory()->IsTrue();
+ return native_context()->out_of_memory()->IsTrue();
}
-// Mark the global context with out of memory.
+// Mark the native context with out of memory.
inline void Context::mark_out_of_memory() {
- global_context()->set_out_of_memory(HEAP->true_value());
+ native_context()->set_out_of_memory(HEAP->true_value());
}
diff --git a/src/3rdparty/v8/src/json-parser.h b/src/3rdparty/v8/src/json-parser.h
index d22cd0d..6f8c715 100644
--- a/src/3rdparty/v8/src/json-parser.h
+++ b/src/3rdparty/v8/src/json-parser.h
@@ -43,15 +43,15 @@ namespace internal {
template <bool seq_ascii>
class JsonParser BASE_EMBEDDED {
public:
- static Handle<Object> Parse(Handle<String> source) {
- return JsonParser().ParseJson(source);
+ static Handle<Object> Parse(Handle<String> source, Zone* zone) {
+ return JsonParser().ParseJson(source, zone);
}
static const int kEndOfString = -1;
private:
// Parse a string containing a single JSON value.
- Handle<Object> ParseJson(Handle<String> source);
+ Handle<Object> ParseJson(Handle<String> source, Zone* zone);
inline void Advance() {
position_++;
@@ -71,11 +71,11 @@ class JsonParser BASE_EMBEDDED {
inline void AdvanceSkipWhitespace() {
do {
Advance();
- } while (c0_ == '\t' || c0_ == '\r' || c0_ == '\n' || c0_ == ' ');
+ } while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r');
}
inline void SkipWhitespace() {
- while (c0_ == '\t' || c0_ == '\r' || c0_ == '\n' || c0_ == ' ') {
+ while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r') {
Advance();
}
}
@@ -149,8 +149,12 @@ class JsonParser BASE_EMBEDDED {
}
inline Isolate* isolate() { return isolate_; }
+ inline Factory* factory() { return factory_; }
+ inline Handle<JSFunction> object_constructor() { return object_constructor_; }
+ inline Zone* zone() const { return zone_; }
static const int kInitialSpecialStringLength = 1024;
+ static const int kPretenureTreshold = 100 * 1024;
private:
@@ -158,17 +162,27 @@ class JsonParser BASE_EMBEDDED {
int source_length_;
Handle<SeqAsciiString> seq_source_;
+ PretenureFlag pretenure_;
Isolate* isolate_;
+ Factory* factory_;
+ Handle<JSFunction> object_constructor_;
uc32 c0_;
int position_;
+ Zone* zone_;
};
template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
+Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
+ Zone* zone) {
isolate_ = source->map()->GetHeap()->isolate();
+ factory_ = isolate_->factory();
+ object_constructor_ = Handle<JSFunction>(
+ isolate()->native_context()->object_function(), isolate());
+ zone_ = zone;
FlattenString(source);
source_ = source;
source_length_ = source_->length();
+ pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
// Optimized fast case where we only have ASCII characters.
if (seq_ascii) {
@@ -181,10 +195,12 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
AdvanceSkipWhitespace();
Handle<Object> result = ParseJsonValue();
if (result.is_null() || c0_ != kEndOfString) {
- // Parse failed. Current character is the unexpected token.
+ // Some exception (for example stack overflow) is already pending.
+ if (isolate_->has_pending_exception()) return Handle<Object>::null();
+ // Parse failed. Current character is the unexpected token.
const char* message;
- Factory* factory = isolate()->factory();
+ Factory* factory = this->factory();
Handle<JSArray> array;
switch (c0_) {
@@ -233,87 +249,118 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
// Parse any JSON value.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
- switch (c0_) {
- case '"':
- return ParseJsonString();
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- return ParseJsonNumber();
- case 'f':
- if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return isolate()->factory()->false_value();
- } else {
- return ReportUnexpectedCharacter();
- }
- case 't':
- if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
- AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return isolate()->factory()->true_value();
- } else {
- return ReportUnexpectedCharacter();
- }
- case 'n':
- if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 'l') {
- AdvanceSkipWhitespace();
- return isolate()->factory()->null_value();
- } else {
- return ReportUnexpectedCharacter();
- }
- case '{':
- return ParseJsonObject();
- case '[':
- return ParseJsonArray();
- default:
- return ReportUnexpectedCharacter();
+ StackLimitCheck stack_check(isolate_);
+ if (stack_check.HasOverflowed()) {
+ isolate_->StackOverflow();
+ return Handle<Object>::null();
+ }
+
+ if (c0_ == '"') return ParseJsonString();
+ if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
+ if (c0_ == '{') return ParseJsonObject();
+ if (c0_ == '[') return ParseJsonArray();
+ if (c0_ == 'f') {
+ if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return factory()->false_value();
+ }
+ return ReportUnexpectedCharacter();
+ }
+ if (c0_ == 't') {
+ if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
+ AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return factory()->true_value();
+ }
+ return ReportUnexpectedCharacter();
+ }
+ if (c0_ == 'n') {
+ if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 'l') {
+ AdvanceSkipWhitespace();
+ return factory()->null_value();
+ }
+ return ReportUnexpectedCharacter();
}
+ return ReportUnexpectedCharacter();
}
// Parse a JSON object. Position must be right at '{'.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
- Handle<JSFunction> object_constructor(
- isolate()->global_context()->object_function());
+ Handle<Object> prototype;
Handle<JSObject> json_object =
- isolate()->factory()->NewJSObject(object_constructor);
+ factory()->NewJSObject(object_constructor(), pretenure_);
ASSERT_EQ(c0_, '{');
AdvanceSkipWhitespace();
if (c0_ != '}') {
do {
if (c0_ != '"') return ReportUnexpectedCharacter();
+
+ int start_position = position_;
+ Advance();
+
+ uint32_t index = 0;
+ if (c0_ >= '0' && c0_ <= '9') {
+ // Maybe an array index, try to parse it.
+ if (c0_ == '0') {
+ // With a leading zero, the string has to be "0" only to be an index.
+ Advance();
+ } else {
+ do {
+ int d = c0_ - '0';
+ if (index > 429496729U - ((d > 5) ? 1 : 0)) break;
+ index = (index * 10) + d;
+ Advance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+
+ if (c0_ == '"') {
+ // Successfully parsed index, parse and store element.
+ AdvanceSkipWhitespace();
+
+ if (c0_ != ':') return ReportUnexpectedCharacter();
+ AdvanceSkipWhitespace();
+ Handle<Object> value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
+
+ JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
+ continue;
+ }
+ // Not an index, fallback to the slow path.
+ }
+
+ position_ = start_position;
+#ifdef DEBUG
+ c0_ = '"';
+#endif
+
Handle<String> key = ParseJsonSymbol();
if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+
AdvanceSkipWhitespace();
Handle<Object> value = ParseJsonValue();
if (value.is_null()) return ReportUnexpectedCharacter();
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
- } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
- SetPrototype(json_object, value);
+ if (key->Equals(isolate()->heap()->Proto_symbol())) {
+ prototype = value;
} else {
- JSObject::SetLocalPropertyIgnoreAttributes(
- json_object, key, value, NONE);
+ if (JSObject::TryTransitionToField(json_object, key)) {
+ int index = json_object->LastAddedFieldIndex();
+ json_object->FastPropertyAtPut(index, *value);
+ } else {
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ json_object, key, value, NONE);
+ }
}
} while (MatchSkipWhiteSpace(','));
if (c0_ != '}') {
return ReportUnexpectedCharacter();
}
+ if (!prototype.is_null()) SetPrototype(json_object, prototype);
}
AdvanceSkipWhitespace();
return json_object;
@@ -322,8 +369,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
// Parse a JSON array. Position must be right at '['.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
- ZoneScope zone_scope(isolate(), DELETE_ON_EXIT);
- ZoneList<Handle<Object> > elements(4);
+ ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
+ ZoneList<Handle<Object> > elements(4, zone());
ASSERT_EQ(c0_, '[');
AdvanceSkipWhitespace();
@@ -331,7 +378,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
do {
Handle<Object> element = ParseJsonValue();
if (element.is_null()) return ReportUnexpectedCharacter();
- elements.Add(element);
+ elements.Add(element, zone());
} while (MatchSkipWhiteSpace(','));
if (c0_ != ']') {
return ReportUnexpectedCharacter();
@@ -340,11 +387,12 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
AdvanceSkipWhitespace();
// Allocate a fixed array with all the elements.
Handle<FixedArray> fast_elements =
- isolate()->factory()->NewFixedArray(elements.length());
+ factory()->NewFixedArray(elements.length(), pretenure_);
for (int i = 0, n = elements.length(); i < n; i++) {
fast_elements->set(i, *elements[i]);
}
- return isolate()->factory()->NewJSArrayWithElements(fast_elements);
+ return factory()->NewJSArrayWithElements(
+ fast_elements, FAST_ELEMENTS, pretenure_);
}
@@ -411,7 +459,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
buffer.Dispose();
}
SkipWhitespace();
- return isolate()->factory()->NewNumber(number);
+ return factory()->NewNumber(number, pretenure_);
}
@@ -429,16 +477,22 @@ inline void SeqStringSet(Handle<SeqAsciiString> seq_str, int i, uc32 c) {
}
template <typename StringType>
-inline Handle<StringType> NewRawString(Factory* factory, int length);
+inline Handle<StringType> NewRawString(Factory* factory,
+ int length,
+ PretenureFlag pretenure);
template <>
-inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length) {
- return factory->NewRawTwoByteString(length, NOT_TENURED);
+inline Handle<SeqTwoByteString> NewRawString(Factory* factory,
+ int length,
+ PretenureFlag pretenure) {
+ return factory->NewRawTwoByteString(length, pretenure);
}
template <>
-inline Handle<SeqAsciiString> NewRawString(Factory* factory, int length) {
- return factory->NewRawAsciiString(length, NOT_TENURED);
+inline Handle<SeqAsciiString> NewRawString(Factory* factory,
+ int length,
+ PretenureFlag pretenure) {
+ return factory->NewRawAsciiString(length, pretenure);
}
@@ -452,8 +506,8 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
int count = end - start;
int max_length = count + source_length_ - position_;
int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
- Handle<StringType> seq_str = NewRawString<StringType>(isolate()->factory(),
- length);
+ Handle<StringType> seq_str =
+ NewRawString<StringType>(factory(), length, pretenure_);
// Copy prefix into seq_str.
SinkChar* dest = seq_str->GetChars();
String::WriteToFlat(*prefix, dest, start, end);
@@ -557,8 +611,58 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
Advance();
if (c0_ == '"') {
AdvanceSkipWhitespace();
- return Handle<String>(isolate()->heap()->empty_string());
+ return factory()->empty_string();
+ }
+
+ if (seq_ascii && is_symbol) {
+ // Fast path for existing symbols. If the the string being parsed is not
+ // a known symbol, contains backslashes or unexpectedly reaches the end of
+ // string, return with an empty handle.
+ uint32_t running_hash = isolate()->heap()->HashSeed();
+ int position = position_;
+ uc32 c0 = c0_;
+ do {
+ if (c0 == '\\') {
+ c0_ = c0;
+ int beg_pos = position_;
+ position_ = position;
+ return SlowScanJsonString<SeqAsciiString, char>(source_,
+ beg_pos,
+ position_);
+ }
+ if (c0 < 0x20) return Handle<String>::null();
+ running_hash = StringHasher::AddCharacterCore(running_hash, c0);
+ position++;
+ if (position >= source_length_) return Handle<String>::null();
+ c0 = seq_source_->SeqAsciiStringGet(position);
+ } while (c0 != '"');
+ int length = position - position_;
+ uint32_t hash = (length <= String::kMaxHashCalcLength)
+ ? StringHasher::GetHashCore(running_hash) : length;
+ Vector<const char> string_vector(
+ seq_source_->GetChars() + position_, length);
+ SymbolTable* symbol_table = isolate()->heap()->symbol_table();
+ uint32_t capacity = symbol_table->Capacity();
+ uint32_t entry = SymbolTable::FirstProbe(hash, capacity);
+ uint32_t count = 1;
+ while (true) {
+ Object* element = symbol_table->KeyAt(entry);
+ if (element == isolate()->heap()->undefined_value()) {
+ // Lookup failure.
+ break;
+ }
+ if (element != isolate()->heap()->the_hole_value() &&
+ String::cast(element)->IsAsciiEqualTo(string_vector)) {
+ // Lookup success, update the current position.
+ position_ = position;
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+ return Handle<String>(String::cast(element), isolate());
+ }
+ entry = SymbolTable::NextProbe(entry, count++, capacity);
+ }
}
+
int beg_pos = position_;
// Fast case for ASCII only without escape characters.
do {
@@ -581,11 +685,11 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
int length = position_ - beg_pos;
Handle<String> result;
if (seq_ascii && is_symbol) {
- result = isolate()->factory()->LookupAsciiSymbol(seq_source_,
- beg_pos,
- length);
+ result = factory()->LookupAsciiSymbol(seq_source_,
+ beg_pos,
+ length);
} else {
- result = isolate()->factory()->NewRawAsciiString(length);
+ result = factory()->NewRawAsciiString(length, pretenure_);
char* dest = SeqAsciiString::cast(*result)->GetChars();
String::WriteToFlat(*source_, dest, beg_pos, position_);
}
diff --git a/src/3rdparty/v8/src/json-stringifier.h b/src/3rdparty/v8/src/json-stringifier.h
new file mode 100644
index 0000000..cdb724f
--- /dev/null
+++ b/src/3rdparty/v8/src/json-stringifier.h
@@ -0,0 +1,800 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JSON_STRINGIFIER_H_
+#define V8_JSON_STRINGIFIER_H_
+
+#include "v8.h"
+#include "v8utils.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+class BasicJsonStringifier BASE_EMBEDDED {
+ public:
+ explicit BasicJsonStringifier(Isolate* isolate);
+
+ MaybeObject* Stringify(Handle<Object> object);
+
+ private:
+ static const int kInitialPartLength = 32;
+ static const int kMaxPartLength = 16 * 1024;
+ static const int kPartLengthGrowthFactor = 2;
+
+ enum Result { UNCHANGED, SUCCESS, EXCEPTION, CIRCULAR, STACK_OVERFLOW };
+
+ void Extend();
+
+ void ChangeEncoding();
+
+ void ShrinkCurrentPart();
+
+ template <bool is_ascii, typename Char>
+ INLINE(void Append_(Char c));
+
+ template <bool is_ascii, typename Char>
+ INLINE(void Append_(const Char* chars));
+
+ INLINE(void Append(char c)) {
+ if (is_ascii_) {
+ Append_<true>(c);
+ } else {
+ Append_<false>(c);
+ }
+ }
+
+ INLINE(void Append(const char* chars)) {
+ if (is_ascii_) {
+ Append_<true>(chars);
+ } else {
+ Append_<false>(chars);
+ }
+ }
+
+ Handle<Object> GetProperty(Handle<JSObject> object,
+ Handle<String> key);
+
+ Handle<Object> ApplyToJsonFunction(Handle<Object> object,
+ Handle<Object> key);
+
+ Result SerializeGeneric(Handle<Object> object,
+ Handle<Object> key,
+ bool deferred_comma,
+ bool deferred_key);
+
+ // Entry point to serialize the object.
+ INLINE(Result SerializeObject(Handle<Object> obj)) {
+ return Serialize_<false>(obj, false, factory_->empty_string());
+ }
+
+ // Serialize an array element.
+ // The index may serve as argument for the toJSON function.
+ INLINE(Result SerializeElement(Handle<Object> object, int i)) {
+ return Serialize_<false>(object, false, Handle<Object>(Smi::FromInt(i)));
+ }
+
+ // Serialize a object property.
+ // The key may or may not be serialized depending on the property.
+ // The key may also serve as argument for the toJSON function.
+ INLINE(Result SerializeProperty(Handle<Object> object,
+ bool deferred_comma,
+ Handle<String> deferred_key)) {
+ ASSERT(!deferred_key.is_null());
+ return Serialize_<true>(object, deferred_comma, deferred_key);
+ }
+
+ template <bool deferred_string_key>
+ Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
+
+ void SerializeDeferredKey(bool deferred_comma, Handle<Object> deferred_key) {
+ if (deferred_comma) Append(',');
+ SerializeString(Handle<String>::cast(deferred_key));
+ Append(':');
+ }
+
+ Result SerializeSmi(Smi* object);
+
+ Result SerializeDouble(double number);
+ INLINE(Result SerializeHeapNumber(Handle<HeapNumber> object)) {
+ return SerializeDouble(object->value());
+ }
+
+ Result SerializeJSValue(Handle<JSValue> object);
+
+ INLINE(Result SerializeJSArray(Handle<JSArray> object));
+ INLINE(Result SerializeJSObject(Handle<JSObject> object));
+
+ Result SerializeJSArraySlow(Handle<JSArray> object, int length);
+
+ void SerializeString(Handle<String> object);
+
+ template <typename SrcChar, typename DestChar>
+ INLINE(void SerializeStringUnchecked_(const SrcChar* src,
+ DestChar* dest,
+ int length));
+
+ template <bool is_ascii, typename Char>
+ INLINE(void SerializeString_(Vector<const Char> vector,
+ Handle<String> string));
+
+ template <typename Char>
+ INLINE(bool DoNotEscape(Char c));
+
+ template <typename Char>
+ INLINE(Vector<const Char> GetCharVector(Handle<String> string));
+
+ Result StackPush(Handle<Object> object);
+ void StackPop();
+
+ INLINE(Handle<String> accumulator()) {
+ return Handle<String>(String::cast(accumulator_store_->value()), isolate_);
+ }
+
+ INLINE(void set_accumulator(Handle<String> string)) {
+ return accumulator_store_->set_value(*string);
+ }
+
+ Isolate* isolate_;
+ Factory* factory_;
+ // We use a value wrapper for the string accumulator to keep the
+ // (indirect) handle to it in the outermost handle scope.
+ Handle<JSValue> accumulator_store_;
+ Handle<String> current_part_;
+ Handle<String> tojson_symbol_;
+ Handle<JSArray> stack_;
+ int current_index_;
+ int part_length_;
+ bool is_ascii_;
+
+ static const int kJsonEscapeTableEntrySize = 8;
+ static const char* const JsonEscapeTable;
+};
+
+
+// Translation table to escape ASCII characters.
+// Table entries start at a multiple of 8 and are null-terminated.
+const char* const BasicJsonStringifier::JsonEscapeTable =
+ "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
+ "\\u0004\0 \\u0005\0 \\u0006\0 \\u0007\0 "
+ "\\b\0 \\t\0 \\n\0 \\u000b\0 "
+ "\\f\0 \\r\0 \\u000e\0 \\u000f\0 "
+ "\\u0010\0 \\u0011\0 \\u0012\0 \\u0013\0 "
+ "\\u0014\0 \\u0015\0 \\u0016\0 \\u0017\0 "
+ "\\u0018\0 \\u0019\0 \\u001a\0 \\u001b\0 "
+ "\\u001c\0 \\u001d\0 \\u001e\0 \\u001f\0 "
+ " \0 !\0 \\\"\0 #\0 "
+ "$\0 %\0 &\0 '\0 "
+ "(\0 )\0 *\0 +\0 "
+ ",\0 -\0 .\0 /\0 "
+ "0\0 1\0 2\0 3\0 "
+ "4\0 5\0 6\0 7\0 "
+ "8\0 9\0 :\0 ;\0 "
+ "<\0 =\0 >\0 ?\0 "
+ "@\0 A\0 B\0 C\0 "
+ "D\0 E\0 F\0 G\0 "
+ "H\0 I\0 J\0 K\0 "
+ "L\0 M\0 N\0 O\0 "
+ "P\0 Q\0 R\0 S\0 "
+ "T\0 U\0 V\0 W\0 "
+ "X\0 Y\0 Z\0 [\0 "
+ "\\\\\0 ]\0 ^\0 _\0 "
+ "`\0 a\0 b\0 c\0 "
+ "d\0 e\0 f\0 g\0 "
+ "h\0 i\0 j\0 k\0 "
+ "l\0 m\0 n\0 o\0 "
+ "p\0 q\0 r\0 s\0 "
+ "t\0 u\0 v\0 w\0 "
+ "x\0 y\0 z\0 {\0 "
+ "|\0 }\0 ~\0 \177\0 ";
+
+
+BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
+ : isolate_(isolate), current_index_(0), is_ascii_(true) {
+ factory_ = isolate_->factory();
+ accumulator_store_ = Handle<JSValue>::cast(
+ factory_->ToObject(factory_->empty_string()));
+ part_length_ = kInitialPartLength;
+ current_part_ = factory_->NewRawAsciiString(kInitialPartLength);
+ tojson_symbol_ = factory_->LookupAsciiSymbol("toJSON");
+ stack_ = factory_->NewJSArray(8);
+}
+
+
+MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) {
+ switch (SerializeObject(object)) {
+ case UNCHANGED:
+ return isolate_->heap()->undefined_value();
+ case SUCCESS:
+ ShrinkCurrentPart();
+ return *factory_->NewConsString(accumulator(), current_part_);
+ case CIRCULAR:
+ return isolate_->Throw(*factory_->NewTypeError(
+ "circular_structure", HandleVector<Object>(NULL, 0)));
+ case STACK_OVERFLOW:
+ return isolate_->StackOverflow();
+ default:
+ return Failure::Exception();
+ }
+}
+
+
+template <bool is_ascii, typename Char>
+void BasicJsonStringifier::Append_(Char c) {
+ if (is_ascii) {
+ SeqAsciiString::cast(*current_part_)->SeqAsciiStringSet(
+ current_index_++, c);
+ } else {
+ SeqTwoByteString::cast(*current_part_)->SeqTwoByteStringSet(
+ current_index_++, c);
+ }
+ if (current_index_ == part_length_) Extend();
+}
+
+
+template <bool is_ascii, typename Char>
+void BasicJsonStringifier::Append_(const Char* chars) {
+ for ( ; *chars != '\0'; chars++) Append_<is_ascii, Char>(*chars);
+}
+
+
+Handle<Object> BasicJsonStringifier::GetProperty(Handle<JSObject> object,
+ Handle<String> key) {
+ LookupResult lookup(isolate_);
+ object->LocalLookupRealNamedProperty(*key, &lookup);
+ if (!lookup.IsProperty()) return factory_->undefined_value();
+ switch (lookup.type()) {
+ case NORMAL: {
+ Object* value = lookup.holder()->GetNormalizedProperty(&lookup);
+ ASSERT(!value->IsTheHole());
+ return Handle<Object>(value, isolate_);
+ }
+ case FIELD: {
+ Object* value = lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
+ ASSERT(!value->IsTheHole());
+ return Handle<Object>(value, isolate_);
+ }
+ case CONSTANT_FUNCTION:
+ return Handle<Object>(lookup.GetConstantFunction(), isolate_);
+ default: {
+ PropertyAttributes attr;
+ return Object::GetProperty(object, object, &lookup, key, &attr);
+ }
+ }
+ return Handle<Object>::null();
+}
+
+
+Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
+ Handle<Object> object, Handle<Object> key) {
+ LookupResult lookup(isolate_);
+ JSObject::cast(*object)->LookupRealNamedProperty(*tojson_symbol_, &lookup);
+ if (!lookup.IsProperty()) return object;
+ PropertyAttributes attr;
+ Handle<Object> fun =
+ Object::GetProperty(object, object, &lookup, tojson_symbol_, &attr);
+ if (!fun->IsJSFunction()) return object;
+
+ // Call toJSON function.
+ if (key->IsSmi()) key = factory_->NumberToString(key);
+ Handle<Object> argv[] = { key };
+ bool has_exception = false;
+ HandleScope scope(isolate_);
+ object = Execution::Call(fun, object, 1, argv, &has_exception);
+ // Return empty handle to signal an exception.
+ if (has_exception) return Handle<Object>::null();
+ return scope.CloseAndEscape(object);
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
+ Handle<Object> object) {
+ StackLimitCheck check(isolate_);
+ if (check.HasOverflowed()) return STACK_OVERFLOW;
+
+ int length = Smi::cast(stack_->length())->value();
+ FixedArray* elements = FixedArray::cast(stack_->elements());
+ for (int i = 0; i < length; i++) {
+ if (elements->get(i) == *object) {
+ return CIRCULAR;
+ }
+ }
+ stack_->EnsureSize(length + 1);
+ FixedArray::cast(stack_->elements())->set(length, *object);
+ stack_->set_length(Smi::FromInt(length + 1));
+ return SUCCESS;
+}
+
+
+void BasicJsonStringifier::StackPop() {
+ int length = Smi::cast(stack_->length())->value();
+ stack_->set_length(Smi::FromInt(length - 1));
+}
+
+
+template <bool deferred_string_key>
+BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
+ Handle<Object> object, bool comma, Handle<Object> key) {
+ if (object->IsJSObject()) {
+ object = ApplyToJsonFunction(object, key);
+ if (object.is_null()) return EXCEPTION;
+ }
+
+ if (object->IsSmi()) {
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeSmi(Smi::cast(*object));
+ }
+
+ switch (HeapObject::cast(*object)->map()->instance_type()) {
+ case HEAP_NUMBER_TYPE:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
+ case ODDBALL_TYPE:
+ switch (Oddball::cast(*object)->kind()) {
+ case Oddball::kFalse:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ Append("false");
+ return SUCCESS;
+ case Oddball::kTrue:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ Append("true");
+ return SUCCESS;
+ case Oddball::kNull:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ Append("null");
+ return SUCCESS;
+ default:
+ return UNCHANGED;
+ }
+ case JS_ARRAY_TYPE:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeJSArray(Handle<JSArray>::cast(object));
+ case JS_VALUE_TYPE:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeJSValue(Handle<JSValue>::cast(object));
+ case JS_FUNCTION_TYPE:
+ return UNCHANGED;
+ default:
+ if (object->IsString()) {
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ SerializeString(Handle<String>::cast(object));
+ return SUCCESS;
+ } else if (object->IsJSObject()) {
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeJSObject(Handle<JSObject>::cast(object));
+ } else {
+ return SerializeGeneric(object, key, comma, deferred_string_key);
+ }
+ }
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
+ Handle<Object> object,
+ Handle<Object> key,
+ bool deferred_comma,
+ bool deferred_key) {
+ Handle<JSObject> builtins(isolate_->native_context()->builtins());
+ Handle<JSFunction> builtin = Handle<JSFunction>::cast(
+ v8::internal::GetProperty(builtins, "JSONSerializeAdapter"));
+
+ Handle<Object> argv[] = { key, object };
+ bool has_exception = false;
+ Handle<Object> result =
+ Execution::Call(builtin, object, 2, argv, &has_exception);
+ if (has_exception) return EXCEPTION;
+ if (result->IsUndefined()) return UNCHANGED;
+ if (deferred_key) {
+ if (key->IsSmi()) key = factory_->NumberToString(key);
+ SerializeDeferredKey(deferred_comma, key);
+ }
+
+ Handle<String> result_string = Handle<String>::cast(result);
+ // Shrink current part, attach it to the accumulator, also attach the result
+ // string to the accumulator, and allocate a new part.
+ ShrinkCurrentPart(); // Shrink.
+ part_length_ = kInitialPartLength; // Allocate conservatively.
+ Extend(); // Attach current part and allocate new part.
+ // Attach result string to the accumulator.
+ set_accumulator(factory_->NewConsString(accumulator(), result_string));
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
+ Handle<JSValue> object) {
+ bool has_exception = false;
+ String* class_name = object->class_name();
+ if (class_name == isolate_->heap()->String_symbol()) {
+ Handle<Object> value = Execution::ToString(object, &has_exception);
+ if (has_exception) return EXCEPTION;
+ SerializeString(Handle<String>::cast(value));
+ } else if (class_name == isolate_->heap()->Number_symbol()) {
+ Handle<Object> value = Execution::ToNumber(object, &has_exception);
+ if (has_exception) return EXCEPTION;
+ if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
+ SerializeHeapNumber(Handle<HeapNumber>::cast(value));
+ } else {
+ ASSERT(class_name == isolate_->heap()->Boolean_symbol());
+ Object* value = JSValue::cast(*object)->value();
+ ASSERT(value->IsBoolean());
+ Append(value->IsTrue() ? "true" : "false");
+ }
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
+ static const int kBufferSize = 100;
+ char chars[kBufferSize];
+ Vector<char> buffer(chars, kBufferSize);
+ Append(IntToCString(object->value(), buffer));
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
+ double number) {
+ if (isinf(number) || isnan(number)) {
+ Append("null");
+ return SUCCESS;
+ }
+ static const int kBufferSize = 100;
+ char chars[kBufferSize];
+ Vector<char> buffer(chars, kBufferSize);
+ Append(DoubleToCString(number, buffer));
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
+ Handle<JSArray> object) {
+ HandleScope handle_scope(isolate_);
+ Result stack_push = StackPush(object);
+ if (stack_push != SUCCESS) return stack_push;
+ int length = Smi::cast(object->length())->value();
+ Append('[');
+ switch (object->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS: {
+ Handle<FixedArray> elements(
+ FixedArray::cast(object->elements()), isolate_);
+ for (int i = 0; i < length; i++) {
+ if (i > 0) Append(',');
+ SerializeSmi(Smi::cast(elements->get(i)));
+ }
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(object->elements()), isolate_);
+ for (int i = 0; i < length; i++) {
+ if (i > 0) Append(',');
+ SerializeDouble(elements->get_scalar(i));
+ }
+ break;
+ }
+ case FAST_ELEMENTS: {
+ Handle<FixedArray> elements(
+ FixedArray::cast(object->elements()), isolate_);
+ for (int i = 0; i < length; i++) {
+ if (i > 0) Append(',');
+ Result result =
+ SerializeElement(Handle<Object>(elements->get(i), isolate_), i);
+ if (result == SUCCESS) continue;
+ if (result == UNCHANGED) {
+ Append("null");
+ } else {
+ return result;
+ }
+ }
+ break;
+ }
+ // TODO(yangguo): The FAST_HOLEY_* cases could be handled in a faster way.
+ // They resemble the non-holey cases except that a prototype chain lookup
+ // is necessary for holes.
+ default: {
+ Result result = SerializeJSArraySlow(object, length);
+ if (result != SUCCESS) return result;
+ break;
+ }
+ }
+ Append(']');
+ StackPop();
+ current_part_ = handle_scope.CloseAndEscape(current_part_);
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
+ Handle<JSArray> object, int length) {
+ for (int i = 0; i < length; i++) {
+ if (i > 0) Append(',');
+ Handle<Object> element = Object::GetElement(object, i);
+ if (element->IsUndefined()) {
+ Append("null");
+ } else {
+ Result result = SerializeElement(element, i);
+ if (result == SUCCESS) continue;
+ if (result == UNCHANGED) {
+ Append("null");
+ } else {
+ return result;
+ }
+ }
+ }
+ return SUCCESS;
+}
+
+
+BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
+ Handle<JSObject> object) {
+ HandleScope handle_scope(isolate_);
+ Result stack_push = StackPush(object);
+ if (stack_push != SUCCESS) return stack_push;
+ if (object->IsJSGlobalProxy()) {
+ object = Handle<JSObject>(
+ JSObject::cast(object->GetPrototype()), isolate_);
+ ASSERT(object->IsGlobalObject());
+ }
+
+ Append('{');
+ bool comma = false;
+
+ if (object->HasFastProperties() &&
+ !object->HasIndexedInterceptor() &&
+ !object->HasNamedInterceptor() &&
+ object->elements()->length() == 0) {
+ Handle<Map> map(object->map());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ Handle<String> key(map->instance_descriptors()->GetKey(i), isolate_);
+ PropertyDetails details = map->instance_descriptors()->GetDetails(i);
+ if (details.IsDontEnum() || details.IsDeleted()) continue;
+ Handle<Object> property;
+ if (details.type() == FIELD && *map == object->map()) {
+ property = Handle<Object>(
+ object->FastPropertyAt(
+ map->instance_descriptors()->GetFieldIndex(i)),
+ isolate_);
+ } else {
+ property = GetProperty(object, key);
+ if (property.is_null()) return EXCEPTION;
+ }
+ Result result = SerializeProperty(property, comma, key);
+ if (!comma && result == SUCCESS) comma = true;
+ if (result >= EXCEPTION) return result;
+ }
+ } else {
+ bool has_exception = false;
+ Handle<FixedArray> contents =
+ GetKeysInFixedArrayFor(object, LOCAL_ONLY, &has_exception);
+ if (has_exception) return EXCEPTION;
+
+ for (int i = 0; i < contents->length(); i++) {
+ Object* key = contents->get(i);
+ Handle<String> key_handle;
+ Handle<Object> property;
+ if (key->IsString()) {
+ key_handle = Handle<String>(String::cast(key), isolate_);
+ property = GetProperty(object, key_handle);
+ } else {
+ ASSERT(key->IsNumber());
+ key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
+ uint32_t index;
+ if (key->IsSmi()) {
+ property = Object::GetElement(object, Smi::cast(key)->value());
+ } else if (key_handle->AsArrayIndex(&index)) {
+ property = Object::GetElement(object, index);
+ } else {
+ property = GetProperty(object, key_handle);
+ }
+ }
+ if (property.is_null()) return EXCEPTION;
+ Result result = SerializeProperty(property, comma, key_handle);
+ if (!comma && result == SUCCESS) comma = true;
+ if (result >= EXCEPTION) return result;
+ }
+ }
+
+ Append('}');
+ StackPop();
+ current_part_ = handle_scope.CloseAndEscape(current_part_);
+ return SUCCESS;
+}
+
+
+void BasicJsonStringifier::ShrinkCurrentPart() {
+ ASSERT(current_index_ < part_length_);
+ if (current_index_ == 0) {
+ current_part_ = factory_->empty_string();
+ return;
+ }
+
+ int string_size, allocated_string_size;
+ if (is_ascii_) {
+ allocated_string_size = SeqAsciiString::SizeFor(part_length_);
+ string_size = SeqAsciiString::SizeFor(current_index_);
+ } else {
+ allocated_string_size = SeqTwoByteString::SizeFor(part_length_);
+ string_size = SeqTwoByteString::SizeFor(current_index_);
+ }
+
+ int delta = allocated_string_size - string_size;
+ current_part_->set_length(current_index_);
+
+ // String sizes are pointer size aligned, so that we can use filler objects
+ // that are a multiple of pointer size.
+ Address end_of_string = current_part_->address() + string_size;
+ isolate_->heap()->CreateFillerObjectAt(end_of_string, delta);
+ if (Marking::IsBlack(Marking::MarkBitFrom(*current_part_))) {
+ MemoryChunk::IncrementLiveBytesFromMutator(
+ current_part_->address(), -delta);
+ }
+}
+
+
+void BasicJsonStringifier::Extend() {
+ set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
+ part_length_ *= kPartLengthGrowthFactor;
+ }
+ if (is_ascii_) {
+ current_part_ = factory_->NewRawAsciiString(part_length_);
+ } else {
+ current_part_ = factory_->NewRawTwoByteString(part_length_);
+ }
+ current_index_ = 0;
+}
+
+
+void BasicJsonStringifier::ChangeEncoding() {
+ ShrinkCurrentPart();
+ set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ current_part_ = factory_->NewRawTwoByteString(part_length_);
+ current_index_ = 0;
+ is_ascii_ = false;
+}
+
+
+template <typename SrcChar, typename DestChar>
+void BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src,
+ DestChar* dest,
+ int length) {
+ dest += current_index_;
+ DestChar* dest_start = dest;
+
+ // Assert that uc16 character is not truncated down to 8 bit.
+ // The <uc16, char> version of this method must not be called.
+ ASSERT(sizeof(*dest) >= sizeof(*src));
+
+ for (int i = 0; i < length; i++) {
+ SrcChar c = src[i];
+ if (DoNotEscape(c)) {
+ *(dest++) = static_cast<DestChar>(c);
+ } else {
+ const char* chars = &JsonEscapeTable[c * kJsonEscapeTableEntrySize];
+ while (*chars != '\0') *(dest++) = *(chars++);
+ }
+ }
+
+ current_index_ += static_cast<int>(dest - dest_start);
+}
+
+
+template <bool is_ascii, typename Char>
+void BasicJsonStringifier::SerializeString_(Vector<const Char> vector,
+ Handle<String> string) {
+ int length = vector.length();
+ Append_<is_ascii, char>('"');
+ // We make a rough estimate to find out if the current string can be
+ // serialized without allocating a new string part. The worst case length of
+ // an escaped character is 6. Shifting the remainin string length right by 3
+ // is a more pessimistic estimate, but faster to calculate.
+
+ if (((part_length_ - current_index_) >> 3) > length) {
+ if (is_ascii) {
+ SerializeStringUnchecked_(
+ vector.start(),
+ SeqAsciiString::cast(*current_part_)->GetChars(),
+ length);
+ } else {
+ SerializeStringUnchecked_(
+ vector.start(),
+ SeqTwoByteString::cast(*current_part_)->GetChars(),
+ length);
+ }
+ } else {
+ String* string_location = *string;
+ for (int i = 0; i < length; i++) {
+ Char c = vector[i];
+ if (DoNotEscape(c)) {
+ Append_<is_ascii, Char>(c);
+ } else {
+ Append_<is_ascii, char>(
+ &JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
+ }
+ // If GC moved the string, we need to refresh the vector.
+ if (*string != string_location) {
+ vector = GetCharVector<Char>(string);
+ string_location = *string;
+ }
+ }
+ }
+
+ Append_<is_ascii, char>('"');
+}
+
+
+template <>
+bool BasicJsonStringifier::DoNotEscape(char c) {
+ return c >= '#' && c <= '~' && c != '\\';
+}
+
+
+template <>
+bool BasicJsonStringifier::DoNotEscape(uc16 c) {
+ return (c >= 0x80) || (c >= '#' && c <= '~' && c != '\\');
+}
+
+
+template <>
+Vector<const char> BasicJsonStringifier::GetCharVector(Handle<String> string) {
+ String::FlatContent flat = string->GetFlatContent();
+ ASSERT(flat.IsAscii());
+ return flat.ToAsciiVector();
+}
+
+
+template <>
+Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
+ String::FlatContent flat = string->GetFlatContent();
+ ASSERT(flat.IsTwoByte());
+ return flat.ToUC16Vector();
+}
+
+
+void BasicJsonStringifier::SerializeString(Handle<String> object) {
+ FlattenString(object);
+ String::FlatContent flat = object->GetFlatContent();
+ if (is_ascii_) {
+ if (flat.IsAscii()) {
+ SerializeString_<true, char>(flat.ToAsciiVector(), object);
+ } else {
+ ChangeEncoding();
+ SerializeString(object);
+ }
+ } else {
+ if (flat.IsAscii()) {
+ SerializeString_<false, char>(flat.ToAsciiVector(), object);
+ } else {
+ SerializeString_<false, uc16>(flat.ToUC16Vector(), object);
+ }
+ }
+}
+
+} } // namespace v8::internal
+
+#endif // V8_JSON_STRINGIFIER_H_
diff --git a/src/3rdparty/v8/src/json.js b/src/3rdparty/v8/src/json.js
index ccef445..9ab1a31 100644
--- a/src/3rdparty/v8/src/json.js
+++ b/src/3rdparty/v8/src/json.js
@@ -178,140 +178,9 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
}
-function BasicSerializeArray(value, stack, builder) {
- var len = value.length;
- if (len == 0) {
- builder.push("[]");
- return;
- }
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- builder.push("[");
- var val = value[0];
- if (IS_STRING(val)) {
- // First entry is a string. Remaining entries are likely to be strings too.
- var array_string = %QuoteJSONStringArray(value);
- if (!IS_UNDEFINED(array_string)) {
- // array_string also includes bracket characters so we are done.
- builder[builder.length - 1] = array_string;
- stack.pop();
- return;
- } else {
- builder.push(%QuoteJSONString(val));
- for (var i = 1; i < len; i++) {
- val = value[i];
- if (IS_STRING(val)) {
- builder.push(%QuoteJSONStringComma(val));
- } else {
- builder.push(",");
- var before = builder.length;
- BasicJSONSerialize(i, val, stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
- }
- }
- }
- } else if (IS_NUMBER(val)) {
- // First entry is a number. Remaining entries are likely to be numbers too.
- builder.push(JSON_NUMBER_TO_STRING(val));
- for (var i = 1; i < len; i++) {
- builder.push(",");
- val = value[i];
- if (IS_NUMBER(val)) {
- builder.push(JSON_NUMBER_TO_STRING(val));
- } else {
- var before = builder.length;
- BasicJSONSerialize(i, val, stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
- }
- }
- } else {
- var before = builder.length;
- BasicJSONSerialize(0, val, stack, builder);
- if (before == builder.length) builder.push("null");
- for (var i = 1; i < len; i++) {
- builder.push(",");
- before = builder.length;
- BasicJSONSerialize(i, value[i], stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
- }
- }
- stack.pop();
- builder.push("]");
-}
-
-
-function BasicSerializeObject(value, stack, builder) {
- if (!%PushIfAbsent(stack, value)) {
- throw MakeTypeError('circular_structure', $Array());
- }
- builder.push("{");
- var first = true;
- for (var p in value) {
- if (%HasLocalProperty(value, p)) {
- if (!first) {
- builder.push(%QuoteJSONStringComma(p));
- } else {
- builder.push(%QuoteJSONString(p));
- }
- builder.push(":");
- var before = builder.length;
- BasicJSONSerialize(p, value[p], stack, builder);
- if (before == builder.length) {
- builder.pop();
- builder.pop();
- } else {
- first = false;
- }
- }
- }
- stack.pop();
- builder.push("}");
-}
-
-
-function BasicJSONSerialize(key, value, stack, builder) {
- if (IS_SPEC_OBJECT(value)) {
- var toJSON = value.toJSON;
- if (IS_SPEC_FUNCTION(toJSON)) {
- value = %_CallFunction(value, ToString(key), toJSON);
- }
- }
- if (IS_STRING(value)) {
- builder.push(value !== "" ? %QuoteJSONString(value) : '""');
- } else if (IS_NUMBER(value)) {
- builder.push(JSON_NUMBER_TO_STRING(value));
- } else if (IS_BOOLEAN(value)) {
- builder.push(value ? "true" : "false");
- } else if (IS_NULL(value)) {
- builder.push("null");
- } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
- // Value is a non-callable object.
- // Unwrap value if necessary
- if (IS_NUMBER_WRAPPER(value)) {
- value = ToNumber(value);
- builder.push(JSON_NUMBER_TO_STRING(value));
- } else if (IS_STRING_WRAPPER(value)) {
- builder.push(%QuoteJSONString(ToString(value)));
- } else if (IS_BOOLEAN_WRAPPER(value)) {
- builder.push(%_ValueOf(value) ? "true" : "false");
- } else if (IS_ARRAY(value)) {
- BasicSerializeArray(value, stack, builder);
- } else {
- BasicSerializeObject(value, stack, builder);
- }
- }
-}
-
-
function JSONStringify(value, replacer, space) {
if (%_ArgumentsLength() == 1) {
- var builder = new InternalArray();
- BasicJSONSerialize('', value, new InternalArray(), builder);
- if (builder.length == 0) return;
- var result = %_FastAsciiArrayJoin(builder, "");
- if (!IS_UNDEFINED(result)) return result;
- return %StringBuilderConcat(builder, builder.length, "");
+ return %BasicJSONStringify(value);
}
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
@@ -337,6 +206,7 @@ function JSONStringify(value, replacer, space) {
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
+
function SetUpJSON() {
%CheckIsBootstrapping();
InstallFunctions($JSON, DONT_ENUM, $Array(
@@ -345,4 +215,12 @@ function SetUpJSON() {
));
}
+
+function JSONSerializeAdapter(key, object) {
+ var holder = {};
+ holder[key] = object;
+ // No need to pass the actual holder since there is no replacer function.
+ return JSONSerialize(key, holder, void 0, new InternalArray(), "", "");
+}
+
SetUpJSON();
diff --git a/src/3rdparty/v8/src/jsregexp.cc b/src/3rdparty/v8/src/jsregexp.cc
index 3455abc..e59170d 100644
--- a/src/3rdparty/v8/src/jsregexp.cc
+++ b/src/3rdparty/v8/src/jsregexp.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -167,7 +167,9 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
- Handle<String> flag_str) {
+ Handle<String> flag_str,
+ Zone* zone) {
+ ZoneScope zone_scope(zone, DELETE_ON_EXIT);
Isolate* isolate = re->GetIsolate();
JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
CompilationCache* compilation_cache = isolate->compilation_cache();
@@ -181,12 +183,11 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
return re;
}
pattern = FlattenGetString(pattern);
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &parse_result)) {
+ &parse_result, zone)) {
// Throw an exception if we fail to parse the pattern.
ThrowRegExpException(re,
pattern,
@@ -277,11 +278,12 @@ static void SetAtomLastCapture(FixedArray* array,
}
-Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
- Handle<String> subject,
- int index,
- Handle<JSArray> last_match_info) {
- Isolate* isolate = re->GetIsolate();
+int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ int32_t* output,
+ int output_size) {
+ Isolate* isolate = regexp->GetIsolate();
ASSERT(0 <= index);
ASSERT(index <= subject->length());
@@ -289,15 +291,16 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
if (!subject->IsFlat()) FlattenString(subject);
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- String* needle = String::cast(re->DataAt(JSRegExp::kAtomPatternIndex));
+ String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
int needle_len = needle->length();
ASSERT(needle->IsFlat());
+ ASSERT_LT(0, needle_len);
- if (needle_len != 0) {
- if (index + needle_len > subject->length()) {
- return isolate->factory()->null_value();
- }
+ if (index + needle_len > subject->length()) {
+ return RegExpImpl::RE_FAILURE;
+ }
+ for (int i = 0; i < output_size; i += 2) {
String::FlatContent needle_content = needle->GetFlatContent();
String::FlatContent subject_content = subject->GetFlatContent();
ASSERT(needle_content.IsFlat());
@@ -322,15 +325,36 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
subject_content.ToUC16Vector(),
needle_content.ToUC16Vector(),
index)));
- if (index == -1) return isolate->factory()->null_value();
+ if (index == -1) {
+ return i / 2; // Return number of matches.
+ } else {
+ output[i] = index;
+ output[i+1] = index + needle_len;
+ index += needle_len;
+ }
}
- ASSERT(last_match_info->HasFastElements());
+ return output_size / 2;
+}
- {
- NoHandleAllocation no_handles;
- FixedArray* array = FixedArray::cast(last_match_info->elements());
- SetAtomLastCapture(array, *subject, index, index + needle_len);
- }
+
+Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
+ Handle<String> subject,
+ int index,
+ Handle<JSArray> last_match_info) {
+ Isolate* isolate = re->GetIsolate();
+
+ static const int kNumRegisters = 2;
+ STATIC_ASSERT(kNumRegisters <= Isolate::kJSRegexpStaticOffsetsVectorSize);
+ int32_t* output_registers = isolate->jsregexp_static_offsets_vector();
+
+ int res = AtomExecRaw(re, subject, index, output_registers, kNumRegisters);
+
+ if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
+
+ ASSERT_EQ(res, RegExpImpl::RE_SUCCESS);
+ NoHandleAllocation no_handles;
+ FixedArray* array = FixedArray::cast(last_match_info->elements());
+ SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
return last_match_info;
}
@@ -385,7 +409,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
bool is_ascii) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
// If we had a compilation error the last time this is saved at the
// saved code index.
@@ -416,8 +440,10 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
if (!pattern->IsFlat()) FlattenString(pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
+ Zone* zone = isolate->runtime_zone();
if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
- &compile_data)) {
+ &compile_data,
+ zone)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
ThrowRegExpException(re,
@@ -429,10 +455,12 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
RegExpEngine::CompilationResult result =
RegExpEngine::Compile(&compile_data,
flags.is_ignore_case(),
+ flags.is_global(),
flags.is_multiline(),
pattern,
sample_subject,
- is_ascii);
+ is_ascii,
+ zone);
if (result.error_message != NULL) {
// Unable to compile regexp.
Handle<String> error_message =
@@ -506,7 +534,11 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
#ifdef V8_INTERPRETED_REGEXP
// Byte-code regexp needs space allocated for all its registers.
- return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data()));
+ // The result captures are copied to the start of the registers array
+ // if the match succeeds. This way those registers are not clobbered
+ // when we set the last match info from last successful match.
+ return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data())) +
+ (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
#else // V8_INTERPRETED_REGEXP
// Native regexp only needs room to output captures. Registers are handled
// internally.
@@ -515,11 +547,11 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
}
-RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
- Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Vector<int> output) {
+int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ int32_t* output,
+ int output_size) {
Isolate* isolate = regexp->GetIsolate();
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
@@ -531,15 +563,19 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
bool is_ascii = subject->IsAsciiRepresentationUnderneath();
#ifndef V8_INTERPRETED_REGEXP
- ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
+ ASSERT(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
EnsureCompiledIrregexp(regexp, subject, is_ascii);
Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
+ // The stack is used to allocate registers for the compiled regexp code.
+ // This means that in case of failure, the output registers array is left
+ // untouched and contains the capture results from the previous successful
+ // match. We can use that to set the last match info lazily.
NativeRegExpMacroAssembler::Result res =
NativeRegExpMacroAssembler::Match(code,
subject,
- output.start(),
- output.length(),
+ output,
+ output_size,
index,
isolate);
if (res != NativeRegExpMacroAssembler::RETRY) {
@@ -566,22 +602,29 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
return RE_EXCEPTION;
#else // V8_INTERPRETED_REGEXP
- ASSERT(output.length() >= IrregexpNumberOfRegisters(*irregexp));
+ ASSERT(output_size >= IrregexpNumberOfRegisters(*irregexp));
// We must have done EnsureCompiledIrregexp, so we can get the number of
// registers.
- int* register_vector = output.start();
int number_of_capture_registers =
(IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
+ int32_t* raw_output = &output[number_of_capture_registers];
+ // We do not touch the actual capture result registers until we know there
+ // has been a match so that we can use those capture results to set the
+ // last match info.
for (int i = number_of_capture_registers - 1; i >= 0; i--) {
- register_vector[i] = -1;
+ raw_output[i] = -1;
}
Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
IrregexpResult result = IrregexpInterpreter::Match(isolate,
byte_codes,
subject,
- register_vector,
+ raw_output,
index);
+ if (result == RE_SUCCESS) {
+ // Copy capture results to the start of the registers array.
+ memcpy(output, raw_output, number_of_capture_registers * sizeof(int32_t));
+ }
if (result == RE_EXCEPTION) {
ASSERT(!isolate->has_pending_exception());
isolate->StackOverflow();
@@ -591,50 +634,44 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
}
-Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
+Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
Handle<String> subject,
int previous_index,
Handle<JSArray> last_match_info) {
- Isolate* isolate = jsregexp->GetIsolate();
- ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
+ Isolate* isolate = regexp->GetIsolate();
+ ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
// Prepare space for the return values.
-#ifdef V8_INTERPRETED_REGEXP
-#ifdef DEBUG
+#if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
if (FLAG_trace_regexp_bytecodes) {
- String* pattern = jsregexp->Pattern();
+ String* pattern = regexp->Pattern();
PrintF("\n\nRegexp match: /%s/\n\n", *(pattern->ToCString()));
PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
}
#endif
-#endif
- int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
+ int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
if (required_registers < 0) {
// Compiling failed with an exception.
ASSERT(isolate->has_pending_exception());
return Handle<Object>::null();
}
- OffsetsVector registers(required_registers, isolate);
+ int32_t* output_registers = NULL;
+ if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ output_registers = NewArray<int32_t>(required_registers);
+ }
+ SmartArrayPointer<int32_t> auto_release(output_registers);
+ if (output_registers == NULL) {
+ output_registers = isolate->jsregexp_static_offsets_vector();
+ }
- IrregexpResult res = RegExpImpl::IrregexpExecOnce(
- jsregexp, subject, previous_index, Vector<int>(registers.vector(),
- registers.length()));
+ int res = RegExpImpl::IrregexpExecRaw(
+ regexp, subject, previous_index, output_registers, required_registers);
if (res == RE_SUCCESS) {
- int capture_register_count =
- (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
- last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
- AssertNoAllocation no_gc;
- int* register_vector = registers.vector();
- FixedArray* array = FixedArray::cast(last_match_info->elements());
- for (int i = 0; i < capture_register_count; i += 2) {
- SetCapture(array, i, register_vector[i]);
- SetCapture(array, i + 1, register_vector[i + 1]);
- }
- SetLastCaptureCount(array, capture_register_count);
- SetLastSubject(array, *subject);
- SetLastInput(array, *subject);
- return last_match_info;
+ int capture_count =
+ IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
+ return SetLastMatchInfo(
+ last_match_info, subject, capture_count, output_registers);
}
if (res == RE_EXCEPTION) {
ASSERT(isolate->has_pending_exception());
@@ -645,6 +682,146 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
}
+Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
+ Handle<String> subject,
+ int capture_count,
+ int32_t* match) {
+ int capture_register_count = (capture_count + 1) * 2;
+ last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
+ AssertNoAllocation no_gc;
+ FixedArray* array = FixedArray::cast(last_match_info->elements());
+ if (match != NULL) {
+ for (int i = 0; i < capture_register_count; i += 2) {
+ SetCapture(array, i, match[i]);
+ SetCapture(array, i + 1, match[i + 1]);
+ }
+ }
+ SetLastCaptureCount(array, capture_register_count);
+ SetLastSubject(array, *subject);
+ SetLastInput(array, *subject);
+ return last_match_info;
+}
+
+
+RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ bool is_global,
+ Isolate* isolate)
+ : register_array_(NULL),
+ register_array_size_(0),
+ regexp_(regexp),
+ subject_(subject) {
+#ifdef V8_INTERPRETED_REGEXP
+ bool interpreted = true;
+#else
+ bool interpreted = false;
+#endif // V8_INTERPRETED_REGEXP
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ static const int kAtomRegistersPerMatch = 2;
+ registers_per_match_ = kAtomRegistersPerMatch;
+ // There is no distinction between interpreted and native for atom regexps.
+ interpreted = false;
+ } else {
+ registers_per_match_ = RegExpImpl::IrregexpPrepare(regexp_, subject_);
+ if (registers_per_match_ < 0) {
+ num_matches_ = -1; // Signal exception.
+ return;
+ }
+ }
+
+ if (is_global && !interpreted) {
+ register_array_size_ =
+ Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
+ max_matches_ = register_array_size_ / registers_per_match_;
+ } else {
+ // Global loop in interpreted regexp is not implemented. We choose
+ // the size of the offsets vector so that it can only store one match.
+ register_array_size_ = registers_per_match_;
+ max_matches_ = 1;
+ }
+
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ register_array_ = NewArray<int32_t>(register_array_size_);
+ } else {
+ register_array_ = isolate->jsregexp_static_offsets_vector();
+ }
+
+ // Set state so that fetching the results the first time triggers a call
+ // to the compiled regexp.
+ current_match_index_ = max_matches_ - 1;
+ num_matches_ = max_matches_;
+ ASSERT(registers_per_match_ >= 2); // Each match has at least one capture.
+ ASSERT_GE(register_array_size_, registers_per_match_);
+ int32_t* last_match =
+ &register_array_[current_match_index_ * registers_per_match_];
+ last_match[0] = -1;
+ last_match[1] = 0;
+}
+
+
+RegExpImpl::GlobalCache::~GlobalCache() {
+ // Deallocate the register array if we allocated it in the constructor
+ // (as opposed to using the existing jsregexp_static_offsets_vector).
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ DeleteArray(register_array_);
+ }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::FetchNext() {
+ current_match_index_++;
+ if (current_match_index_ >= num_matches_) {
+ // Current batch of results exhausted.
+ // Fail if last batch was not even fully filled.
+ if (num_matches_ < max_matches_) {
+ num_matches_ = 0; // Signal failed match.
+ return NULL;
+ }
+
+ int32_t* last_match =
+ &register_array_[(current_match_index_ - 1) * registers_per_match_];
+ int last_end_index = last_match[1];
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
+ subject_,
+ last_end_index,
+ register_array_,
+ register_array_size_);
+ } else {
+ int last_start_index = last_match[0];
+ if (last_start_index == last_end_index) last_end_index++;
+ if (last_end_index > subject_->length()) {
+ num_matches_ = 0; // Signal failed match.
+ return NULL;
+ }
+ num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
+ subject_,
+ last_end_index,
+ register_array_,
+ register_array_size_);
+ }
+
+ if (num_matches_ <= 0) return NULL;
+ current_match_index_ = 0;
+ return register_array_;
+ } else {
+ return &register_array_[current_match_index_ * registers_per_match_];
+ }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
+ int index = current_match_index_ * registers_per_match_;
+ if (num_matches_ == 0) {
+ // After a failed match we shift back by one result.
+ index -= registers_per_match_;
+ }
+ return &register_array_[index];
+}
+
+
// -------------------------------------------------------------------
// Implementation of the Irregexp regular expression engine.
//
@@ -795,24 +972,24 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
// the event that code generation is requested for an identical trace.
-void RegExpTree::AppendToText(RegExpText* text) {
+void RegExpTree::AppendToText(RegExpText* text, Zone* zone) {
UNREACHABLE();
}
-void RegExpAtom::AppendToText(RegExpText* text) {
- text->AddElement(TextElement::Atom(this));
+void RegExpAtom::AppendToText(RegExpText* text, Zone* zone) {
+ text->AddElement(TextElement::Atom(this), zone);
}
-void RegExpCharacterClass::AppendToText(RegExpText* text) {
- text->AddElement(TextElement::CharClass(this));
+void RegExpCharacterClass::AppendToText(RegExpText* text, Zone* zone) {
+ text->AddElement(TextElement::CharClass(this), zone);
}
-void RegExpText::AppendToText(RegExpText* text) {
+void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
for (int i = 0; i < elements()->length(); i++)
- text->AddElement(elements()->at(i));
+ text->AddElement(elements()->at(i), zone);
}
@@ -843,8 +1020,8 @@ int TextElement::length() {
DispatchTable* ChoiceNode::GetTable(bool ignore_case) {
if (table_ == NULL) {
- table_ = new DispatchTable();
- DispatchTableConstructor cons(table_, ignore_case);
+ table_ = new(zone()) DispatchTable(zone());
+ DispatchTableConstructor cons(table_, ignore_case, zone());
cons.BuildTable(this);
}
return table_;
@@ -900,7 +1077,8 @@ class FrequencyCollator {
class RegExpCompiler {
public:
- RegExpCompiler(int capture_count, bool ignore_case, bool is_ascii);
+ RegExpCompiler(int capture_count, bool ignore_case, bool is_ascii,
+ Zone* zone);
int AllocateRegister() {
if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
@@ -940,6 +1118,8 @@ class RegExpCompiler {
current_expansion_factor_ = value;
}
+ Zone* zone() const { return zone_; }
+
static const int kNoRegister = -1;
private:
@@ -953,6 +1133,7 @@ class RegExpCompiler {
bool reg_exp_too_big_;
int current_expansion_factor_;
FrequencyCollator frequency_collator_;
+ Zone* zone_;
};
@@ -974,7 +1155,8 @@ static RegExpEngine::CompilationResult IrregexpRegExpTooBig() {
// Attempts to compile the regexp using an Irregexp code generator. Returns
// a fixed array or a null handle depending on whether it succeeded.
-RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case, bool ascii)
+RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case, bool ascii,
+ Zone* zone)
: next_register_(2 * (capture_count + 1)),
work_list_(NULL),
recursion_depth_(0),
@@ -982,8 +1164,9 @@ RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case, bool ascii)
ascii_(ascii),
reg_exp_too_big_(false),
current_expansion_factor_(1),
- frequency_collator_() {
- accept_ = new EndNode(EndNode::ACCEPT);
+ frequency_collator_(),
+ zone_(zone) {
+ accept_ = new(zone) EndNode(EndNode::ACCEPT, zone);
ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
}
@@ -1079,7 +1262,8 @@ bool Trace::GetStoredPosition(int reg, int* cp_offset) {
}
-int Trace::FindAffectedRegisters(OutSet* affected_registers) {
+int Trace::FindAffectedRegisters(OutSet* affected_registers,
+ Zone* zone) {
int max_register = RegExpCompiler::kNoRegister;
for (DeferredAction* action = actions_;
action != NULL;
@@ -1087,10 +1271,10 @@ int Trace::FindAffectedRegisters(OutSet* affected_registers) {
if (action->type() == ActionNode::CLEAR_CAPTURES) {
Interval range = static_cast<DeferredClearCaptures*>(action)->range();
for (int i = range.from(); i <= range.to(); i++)
- affected_registers->Set(i);
+ affected_registers->Set(i, zone);
if (range.to() > max_register) max_register = range.to();
} else {
- affected_registers->Set(action->reg());
+ affected_registers->Set(action->reg(), zone);
if (action->reg() > max_register) max_register = action->reg();
}
}
@@ -1119,7 +1303,8 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
int max_register,
OutSet& affected_registers,
OutSet* registers_to_pop,
- OutSet* registers_to_clear) {
+ OutSet* registers_to_clear,
+ Zone* zone) {
// The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
@@ -1225,9 +1410,9 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
}
assembler->PushRegister(reg, stack_check);
- registers_to_pop->Set(reg);
+ registers_to_pop->Set(reg, zone);
} else if (undo_action == CLEAR) {
- registers_to_clear->Set(reg);
+ registers_to_clear->Set(reg, zone);
}
// Perform the chronologically last action (or accumulated increment)
// for the register.
@@ -1273,14 +1458,16 @@ void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
assembler->PushCurrentPosition();
}
- int max_register = FindAffectedRegisters(&affected_registers);
+ int max_register = FindAffectedRegisters(&affected_registers,
+ compiler->zone());
OutSet registers_to_pop;
OutSet registers_to_clear;
PerformDeferredActions(assembler,
max_register,
affected_registers,
&registers_to_pop,
- &registers_to_clear);
+ &registers_to_clear,
+ compiler->zone());
if (cp_offset_ != 0) {
assembler->AdvanceCurrentPosition(cp_offset_);
}
@@ -1357,17 +1544,18 @@ void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) {
}
-void GuardedAlternative::AddGuard(Guard* guard) {
+void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) {
if (guards_ == NULL)
- guards_ = new ZoneList<Guard*>(1);
- guards_->Add(guard);
+ guards_ = new(zone) ZoneList<Guard*>(1, zone);
+ guards_->Add(guard, zone);
}
ActionNode* ActionNode::SetRegister(int reg,
int val,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(SET_REGISTER, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(SET_REGISTER, on_success);
result->data_.u_store_register.reg = reg;
result->data_.u_store_register.value = val;
return result;
@@ -1375,7 +1563,8 @@ ActionNode* ActionNode::SetRegister(int reg,
ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
- ActionNode* result = new ActionNode(INCREMENT_REGISTER, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(INCREMENT_REGISTER, on_success);
result->data_.u_increment_register.reg = reg;
return result;
}
@@ -1384,7 +1573,8 @@ ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
ActionNode* ActionNode::StorePosition(int reg,
bool is_capture,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(STORE_POSITION, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(STORE_POSITION, on_success);
result->data_.u_position_register.reg = reg;
result->data_.u_position_register.is_capture = is_capture;
return result;
@@ -1393,7 +1583,8 @@ ActionNode* ActionNode::StorePosition(int reg,
ActionNode* ActionNode::ClearCaptures(Interval range,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(CLEAR_CAPTURES, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(CLEAR_CAPTURES, on_success);
result->data_.u_clear_captures.range_from = range.from();
result->data_.u_clear_captures.range_to = range.to();
return result;
@@ -1403,7 +1594,8 @@ ActionNode* ActionNode::ClearCaptures(Interval range,
ActionNode* ActionNode::BeginSubmatch(int stack_reg,
int position_reg,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(BEGIN_SUBMATCH, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(BEGIN_SUBMATCH, on_success);
result->data_.u_submatch.stack_pointer_register = stack_reg;
result->data_.u_submatch.current_position_register = position_reg;
return result;
@@ -1415,7 +1607,8 @@ ActionNode* ActionNode::PositiveSubmatchSuccess(int stack_reg,
int clear_register_count,
int clear_register_from,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
result->data_.u_submatch.stack_pointer_register = stack_reg;
result->data_.u_submatch.current_position_register = position_reg;
result->data_.u_submatch.clear_register_count = clear_register_count;
@@ -1428,7 +1621,8 @@ ActionNode* ActionNode::EmptyMatchCheck(int start_register,
int repetition_register,
int repetition_limit,
RegExpNode* on_success) {
- ActionNode* result = new ActionNode(EMPTY_MATCH_CHECK, on_success);
+ ActionNode* result =
+ new(on_success->zone()) ActionNode(EMPTY_MATCH_CHECK, on_success);
result->data_.u_empty_match_check.start_register = start_register;
result->data_.u_empty_match_check.repetition_register = repetition_register;
result->data_.u_empty_match_check.repetition_limit = repetition_limit;
@@ -2008,8 +2202,9 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
Label* on_failure,
int cp_offset,
bool check_offset,
- bool preloaded) {
- ZoneList<CharacterRange>* ranges = cc->ranges();
+ bool preloaded,
+ Zone* zone) {
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone);
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
}
@@ -2068,7 +2263,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
}
- if (cc->is_standard() &&
+ if (cc->is_standard(zone) &&
macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
on_failure)) {
return;
@@ -2081,7 +2276,8 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
// entry at zero which goes to the failure label, but if there
// was already one there we fall through for success on that entry.
// Subsequent entries have alternating meaning (success/failure).
- ZoneList<int>* range_boundaries = new ZoneList<int>(last_valid_range);
+ ZoneList<int>* range_boundaries =
+ new(zone) ZoneList<int>(last_valid_range, zone);
bool zeroth_entry_is_failure = !cc->is_negated();
@@ -2091,9 +2287,9 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
ASSERT_EQ(i, 0);
zeroth_entry_is_failure = !zeroth_entry_is_failure;
} else {
- range_boundaries->Add(range.from());
+ range_boundaries->Add(range.from(), zone);
}
- range_boundaries->Add(range.to() + 1);
+ range_boundaries->Add(range.to() + 1, zone);
}
int end_index = range_boundaries->length() - 1;
if (range_boundaries->at(end_index) > max_char) {
@@ -2174,12 +2370,15 @@ int ActionNode::EatsAtLeast(int still_to_find,
void ActionNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
if (type_ == BEGIN_SUBMATCH) {
bm->SetRest(offset);
} else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
- on_success()->FillInBMInfo(offset, bm, not_at_start);
+ on_success()->FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
@@ -2201,11 +2400,15 @@ int AssertionNode::EatsAtLeast(int still_to_find,
}
-void AssertionNode::FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+void AssertionNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
// Match the behaviour of EatsAtLeast on this node.
if (type() == AT_START && not_at_start) return;
- on_success()->FillInBMInfo(offset, bm, not_at_start);
+ on_success()->FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@@ -2484,7 +2687,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
RegExpCharacterClass* tree = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = tree->ranges();
+ ZoneList<CharacterRange>* ranges = tree->ranges(zone());
if (tree->is_negated()) {
// A quick check uses multi-character mask and compare. There is no
// useful way to incorporate a negative char class into this scheme
@@ -2669,7 +2872,7 @@ RegExpNode* TextNode::FilterASCII(int depth) {
} else {
ASSERT(elm.type == TextElement::CHAR_CLASS);
RegExpCharacterClass* cc = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = cc->ranges();
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
}
@@ -2716,6 +2919,15 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) {
if (info()->visited) return this;
VisitMarker marker(info());
int choice_count = alternatives_->length();
+
+ for (int i = 0; i < choice_count; i++) {
+ GuardedAlternative alternative = alternatives_->at(i);
+ if (alternative.guards() != NULL && alternative.guards()->length() != 0) {
+ set_replacement(this);
+ return this;
+ }
+ }
+
int surviving = 0;
RegExpNode* survivor = NULL;
for (int i = 0; i < choice_count; i++) {
@@ -2737,13 +2949,13 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) {
// Only some of the nodes survived the filtering. We need to rebuild the
// alternatives list.
ZoneList<GuardedAlternative>* new_alternatives =
- new ZoneList<GuardedAlternative>(surviving);
+ new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
for (int i = 0; i < choice_count; i++) {
RegExpNode* replacement =
alternatives_->at(i).node()->FilterASCII(depth - 1);
if (replacement != NULL) {
alternatives_->at(i).set_node(replacement);
- new_alternatives->Add(alternatives_->at(i));
+ new_alternatives->Add(alternatives_->at(i), zone());
}
}
alternatives_ = new_alternatives;
@@ -2786,14 +2998,20 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
-void LoopChoiceNode::FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
- if (body_can_be_zero_length_) {
+void LoopChoiceNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
+ if (body_can_be_zero_length_ ||
+ recursion_depth > RegExpCompiler::kMaxRecursion ||
+ budget <= 0) {
bm->SetRest(offset);
SaveBMInfo(bm, not_at_start, offset);
return;
}
- ChoiceNode::FillInBMInfo(offset, bm, not_at_start);
+ ChoiceNode::FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@@ -2894,8 +3112,8 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
- new BoyerMooreLookahead(eats_at_least, compiler);
- FillInBMInfo(0, bm, not_at_start);
+ new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
+ FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
}
@@ -3121,7 +3339,8 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
backtrack,
cp_offset,
*checked_up_to < cp_offset,
- preloaded);
+ preloaded,
+ zone());
UpdateBoundsCheck(cp_offset, checked_up_to);
}
}
@@ -3242,11 +3461,11 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
RegExpCharacterClass* cc = elm.data.u_char_class;
// None of the standard character classes is different in the case
// independent case and it slows us down if we don't know that.
- if (cc->is_standard()) continue;
- ZoneList<CharacterRange>* ranges = cc->ranges();
+ if (cc->is_standard(zone())) continue;
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
int range_count = ranges->length();
for (int j = 0; j < range_count; j++) {
- ranges->at(j).AddCaseEquivalents(ranges, is_ascii);
+ ranges->at(j).AddCaseEquivalents(ranges, is_ascii, zone());
}
}
}
@@ -3269,7 +3488,7 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
TextElement elm = elms_->at(0);
if (elm.type != TextElement::CHAR_CLASS) return NULL;
RegExpCharacterClass* node = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = node->ranges();
+ ZoneList<CharacterRange>* ranges = node->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
}
@@ -3391,13 +3610,13 @@ class AlternativeGeneration: public Malloced {
// size then it is on the stack, otherwise the excess is on the heap.
class AlternativeGenerationList {
public:
- explicit AlternativeGenerationList(int count)
- : alt_gens_(count) {
+ AlternativeGenerationList(int count, Zone* zone)
+ : alt_gens_(count, zone) {
for (int i = 0; i < count && i < kAFew; i++) {
- alt_gens_.Add(a_few_alt_gens_ + i);
+ alt_gens_.Add(a_few_alt_gens_ + i, zone);
}
for (int i = kAFew; i < count; i++) {
- alt_gens_.Add(new AlternativeGeneration());
+ alt_gens_.Add(new AlternativeGeneration(), zone);
}
}
~AlternativeGenerationList() {
@@ -3475,7 +3694,7 @@ void BoyerMoorePositionInfo::SetAll() {
BoyerMooreLookahead::BoyerMooreLookahead(
- int length, RegExpCompiler* compiler)
+ int length, RegExpCompiler* compiler, Zone* zone)
: length_(length),
compiler_(compiler) {
if (compiler->ascii()) {
@@ -3483,9 +3702,9 @@ BoyerMooreLookahead::BoyerMooreLookahead(
} else {
max_char_ = String::kMaxUtf16CodeUnit;
}
- bitmaps_ = new ZoneList<BoyerMoorePositionInfo*>(length);
+ bitmaps_ = new(zone) ZoneList<BoyerMoorePositionInfo*>(length, zone);
for (int i = 0; i < length; i++) {
- bitmaps_->Add(new BoyerMoorePositionInfo());
+ bitmaps_->Add(new(zone) BoyerMoorePositionInfo(zone), zone);
}
}
@@ -3831,9 +4050,11 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
- new BoyerMooreLookahead(eats_at_least, compiler);
+ new(zone()) BoyerMooreLookahead(eats_at_least,
+ compiler,
+ zone());
GuardedAlternative alt0 = alternatives_->at(0);
- alt0.node()->FillInBMInfo(0, bm, not_at_start);
+ alt0.node()->FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
}
} else {
@@ -3853,7 +4074,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
(current_trace->characters_preloaded() == preload_characters);
bool preload_has_checked_bounds = preload_is_current;
- AlternativeGenerationList alt_gens(choice_count);
+ AlternativeGenerationList alt_gens(choice_count, zone());
// For now we just call all choices one after the other. The idea ultimately
// is to use the Dispatch table to try only the relevant ones.
@@ -4333,6 +4554,7 @@ void DotPrinter::VisitChoice(ChoiceNode* that) {
void DotPrinter::VisitText(TextNode* that) {
+ Zone* zone = that->zone();
stream()->Add(" n%p [label=\"", that);
for (int i = 0; i < that->elements()->length(); i++) {
if (i > 0) stream()->Add(" ");
@@ -4347,8 +4569,8 @@ void DotPrinter::VisitText(TextNode* that) {
stream()->Add("[");
if (node->is_negated())
stream()->Add("^");
- for (int j = 0; j < node->ranges()->length(); j++) {
- CharacterRange range = node->ranges()->at(j);
+ for (int j = 0; j < node->ranges(zone)->length(); j++) {
+ CharacterRange range = node->ranges(zone)->at(j);
stream()->Add("%k-%k", range.from(), range.to());
}
stream()->Add("]");
@@ -4506,15 +4728,16 @@ void RegExpEngine::DotPrint(const char* label,
RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- ZoneList<TextElement>* elms = new ZoneList<TextElement>(1);
- elms->Add(TextElement::Atom(this));
- return new TextNode(elms, on_success);
+ ZoneList<TextElement>* elms =
+ new(compiler->zone()) ZoneList<TextElement>(1, compiler->zone());
+ elms->Add(TextElement::Atom(this), compiler->zone());
+ return new(compiler->zone()) TextNode(elms, on_success);
}
RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new TextNode(elements(), on_success);
+ return new(compiler->zone()) TextNode(elements(), on_success);
}
@@ -4568,7 +4791,7 @@ static bool CompareRanges(ZoneList<CharacterRange>* ranges,
}
-bool RegExpCharacterClass::is_standard() {
+bool RegExpCharacterClass::is_standard(Zone* zone) {
// TODO(lrn): Remove need for this function, by not throwing away information
// along the way.
if (is_negated_) {
@@ -4577,31 +4800,31 @@ bool RegExpCharacterClass::is_standard() {
if (set_.is_standard()) {
return true;
}
- if (CompareRanges(set_.ranges(), kSpaceRanges, kSpaceRangeCount)) {
+ if (CompareRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
set_.set_standard_set_type('s');
return true;
}
- if (CompareInverseRanges(set_.ranges(), kSpaceRanges, kSpaceRangeCount)) {
+ if (CompareInverseRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
set_.set_standard_set_type('S');
return true;
}
- if (CompareInverseRanges(set_.ranges(),
+ if (CompareInverseRanges(set_.ranges(zone),
kLineTerminatorRanges,
kLineTerminatorRangeCount)) {
set_.set_standard_set_type('.');
return true;
}
- if (CompareRanges(set_.ranges(),
+ if (CompareRanges(set_.ranges(zone),
kLineTerminatorRanges,
kLineTerminatorRangeCount)) {
set_.set_standard_set_type('n');
return true;
}
- if (CompareRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
+ if (CompareRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
set_.set_standard_set_type('w');
return true;
}
- if (CompareInverseRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
+ if (CompareInverseRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
set_.set_standard_set_type('W');
return true;
}
@@ -4611,7 +4834,7 @@ bool RegExpCharacterClass::is_standard() {
RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new TextNode(this, on_success);
+ return new(compiler->zone()) TextNode(this, on_success);
}
@@ -4619,7 +4842,8 @@ RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
ZoneList<RegExpTree*>* alternatives = this->alternatives();
int length = alternatives->length();
- ChoiceNode* result = new ChoiceNode(length);
+ ChoiceNode* result =
+ new(compiler->zone()) ChoiceNode(length, compiler->zone());
for (int i = 0; i < length; i++) {
GuardedAlternative alternative(alternatives->at(i)->ToNode(compiler,
on_success));
@@ -4712,6 +4936,8 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
int body_start_reg = RegExpCompiler::kNoRegister;
Interval capture_registers = body->CaptureRegisters();
bool needs_capture_clearing = !capture_registers.is_empty();
+ Zone* zone = compiler->zone();
+
if (body_can_be_empty) {
body_start_reg = compiler->AllocateRegister();
} else if (FLAG_regexp_optimization && !needs_capture_clearing) {
@@ -4742,7 +4968,7 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
// Unroll the optional matches up to max.
RegExpNode* answer = on_success;
for (int i = 0; i < max; i++) {
- ChoiceNode* alternation = new ChoiceNode(2);
+ ChoiceNode* alternation = new(zone) ChoiceNode(2, zone);
if (is_greedy) {
alternation->AddAlternative(
GuardedAlternative(body->ToNode(compiler, answer)));
@@ -4765,7 +4991,8 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
int reg_ctr = needs_counter
? compiler->AllocateRegister()
: RegExpCompiler::kNoRegister;
- LoopChoiceNode* center = new LoopChoiceNode(body->min_match() == 0);
+ LoopChoiceNode* center = new(zone) LoopChoiceNode(body->min_match() == 0,
+ zone);
if (not_at_start) center->set_not_at_start();
RegExpNode* loop_return = needs_counter
? static_cast<RegExpNode*>(ActionNode::IncrementRegister(reg_ctr, center))
@@ -4790,13 +5017,14 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
}
GuardedAlternative body_alt(body_node);
if (has_max) {
- Guard* body_guard = new Guard(reg_ctr, Guard::LT, max);
- body_alt.AddGuard(body_guard);
+ Guard* body_guard =
+ new(zone) Guard(reg_ctr, Guard::LT, max);
+ body_alt.AddGuard(body_guard, zone);
}
GuardedAlternative rest_alt(on_success);
if (has_min) {
- Guard* rest_guard = new Guard(reg_ctr, Guard::GEQ, min);
- rest_alt.AddGuard(rest_guard);
+ Guard* rest_guard = new(compiler->zone()) Guard(reg_ctr, Guard::GEQ, min);
+ rest_alt.AddGuard(rest_guard, zone);
}
if (is_greedy) {
center->AddLoopAlternative(body_alt);
@@ -4816,6 +5044,8 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
NodeInfo info;
+ Zone* zone = compiler->zone();
+
switch (type()) {
case START_OF_LINE:
return AssertionNode::AfterNewline(on_success);
@@ -4834,13 +5064,13 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
int stack_pointer_register = compiler->AllocateRegister();
int position_register = compiler->AllocateRegister();
// The ChoiceNode to distinguish between a newline and end-of-input.
- ChoiceNode* result = new ChoiceNode(2);
+ ChoiceNode* result = new(zone) ChoiceNode(2, zone);
// Create a newline atom.
ZoneList<CharacterRange>* newline_ranges =
- new ZoneList<CharacterRange>(3);
- CharacterRange::AddClassEscape('n', newline_ranges);
- RegExpCharacterClass* newline_atom = new RegExpCharacterClass('n');
- TextNode* newline_matcher = new TextNode(
+ new(zone) ZoneList<CharacterRange>(3, zone);
+ CharacterRange::AddClassEscape('n', newline_ranges, zone);
+ RegExpCharacterClass* newline_atom = new(zone) RegExpCharacterClass('n');
+ TextNode* newline_matcher = new(zone) TextNode(
newline_atom,
ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
position_register,
@@ -4868,9 +5098,10 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new BackReferenceNode(RegExpCapture::StartRegister(index()),
- RegExpCapture::EndRegister(index()),
- on_success);
+ return new(compiler->zone())
+ BackReferenceNode(RegExpCapture::StartRegister(index()),
+ RegExpCapture::EndRegister(index()),
+ on_success);
}
@@ -4915,16 +5146,20 @@ RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
// for a negative lookahead. The NegativeLookaheadChoiceNode is a special
// ChoiceNode that knows to ignore the first exit when calculating quick
// checks.
+ Zone* zone = compiler->zone();
+
GuardedAlternative body_alt(
body()->ToNode(
compiler,
- success = new NegativeSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start)));
+ success = new(zone) NegativeSubmatchSuccess(stack_pointer_register,
+ position_register,
+ register_count,
+ register_start,
+ zone)));
ChoiceNode* choice_node =
- new NegativeLookaheadChoiceNode(body_alt,
- GuardedAlternative(on_success));
+ new(zone) NegativeLookaheadChoiceNode(body_alt,
+ GuardedAlternative(on_success),
+ zone);
return ActionNode::BeginSubmatch(stack_pointer_register,
position_register,
choice_node);
@@ -4963,19 +5198,21 @@ RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
static void AddClass(const int* elmv,
int elmc,
- ZoneList<CharacterRange>* ranges) {
+ ZoneList<CharacterRange>* ranges,
+ Zone* zone) {
elmc--;
ASSERT(elmv[elmc] == 0x10000);
for (int i = 0; i < elmc; i += 2) {
ASSERT(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1));
+ ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1), zone);
}
}
static void AddClassNegated(const int *elmv,
int elmc,
- ZoneList<CharacterRange>* ranges) {
+ ZoneList<CharacterRange>* ranges,
+ Zone* zone) {
elmc--;
ASSERT(elmv[elmc] == 0x10000);
ASSERT(elmv[0] != 0x0000);
@@ -4984,51 +5221,54 @@ static void AddClassNegated(const int *elmv,
for (int i = 0; i < elmc; i += 2) {
ASSERT(last <= elmv[i] - 1);
ASSERT(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange(last, elmv[i] - 1));
+ ranges->Add(CharacterRange(last, elmv[i] - 1), zone);
last = elmv[i + 1];
}
- ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit));
+ ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit), zone);
}
void CharacterRange::AddClassEscape(uc16 type,
- ZoneList<CharacterRange>* ranges) {
+ ZoneList<CharacterRange>* ranges,
+ Zone* zone) {
switch (type) {
case 's':
- AddClass(kSpaceRanges, kSpaceRangeCount, ranges);
+ AddClass(kSpaceRanges, kSpaceRangeCount, ranges, zone);
break;
case 'S':
- AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges);
+ AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges, zone);
break;
case 'w':
- AddClass(kWordRanges, kWordRangeCount, ranges);
+ AddClass(kWordRanges, kWordRangeCount, ranges, zone);
break;
case 'W':
- AddClassNegated(kWordRanges, kWordRangeCount, ranges);
+ AddClassNegated(kWordRanges, kWordRangeCount, ranges, zone);
break;
case 'd':
- AddClass(kDigitRanges, kDigitRangeCount, ranges);
+ AddClass(kDigitRanges, kDigitRangeCount, ranges, zone);
break;
case 'D':
- AddClassNegated(kDigitRanges, kDigitRangeCount, ranges);
+ AddClassNegated(kDigitRanges, kDigitRangeCount, ranges, zone);
break;
case '.':
AddClassNegated(kLineTerminatorRanges,
kLineTerminatorRangeCount,
- ranges);
+ ranges,
+ zone);
break;
// This is not a character range as defined by the spec but a
// convenient shorthand for a character class that matches any
// character.
case '*':
- ranges->Add(CharacterRange::Everything());
+ ranges->Add(CharacterRange::Everything(), zone);
break;
// This is the set of characters matched by the $ and ^ symbols
// in multiline mode.
case 'n':
AddClass(kLineTerminatorRanges,
kLineTerminatorRangeCount,
- ranges);
+ ranges,
+ zone);
break;
default:
UNREACHABLE();
@@ -5044,9 +5284,11 @@ Vector<const int> CharacterRange::GetWordBounds() {
class CharacterRangeSplitter {
public:
CharacterRangeSplitter(ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded)
+ ZoneList<CharacterRange>** excluded,
+ Zone* zone)
: included_(included),
- excluded_(excluded) { }
+ excluded_(excluded),
+ zone_(zone) { }
void Call(uc16 from, DispatchTable::Entry entry);
static const int kInBase = 0;
@@ -5055,6 +5297,7 @@ class CharacterRangeSplitter {
private:
ZoneList<CharacterRange>** included_;
ZoneList<CharacterRange>** excluded_;
+ Zone* zone_;
};
@@ -5063,31 +5306,33 @@ void CharacterRangeSplitter::Call(uc16 from, DispatchTable::Entry entry) {
ZoneList<CharacterRange>** target = entry.out_set()->Get(kInOverlay)
? included_
: excluded_;
- if (*target == NULL) *target = new ZoneList<CharacterRange>(2);
- (*target)->Add(CharacterRange(entry.from(), entry.to()));
+ if (*target == NULL) *target = new(zone_) ZoneList<CharacterRange>(2, zone_);
+ (*target)->Add(CharacterRange(entry.from(), entry.to()), zone_);
}
void CharacterRange::Split(ZoneList<CharacterRange>* base,
Vector<const int> overlay,
ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded) {
+ ZoneList<CharacterRange>** excluded,
+ Zone* zone) {
ASSERT_EQ(NULL, *included);
ASSERT_EQ(NULL, *excluded);
- DispatchTable table;
+ DispatchTable table(zone);
for (int i = 0; i < base->length(); i++)
- table.AddRange(base->at(i), CharacterRangeSplitter::kInBase);
+ table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone);
for (int i = 0; i < overlay.length(); i += 2) {
table.AddRange(CharacterRange(overlay[i], overlay[i + 1] - 1),
- CharacterRangeSplitter::kInOverlay);
+ CharacterRangeSplitter::kInOverlay, zone);
}
- CharacterRangeSplitter callback(included, excluded);
+ CharacterRangeSplitter callback(included, excluded, zone);
table.ForEach(&callback);
}
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
- bool is_ascii) {
+ bool is_ascii,
+ Zone* zone) {
Isolate* isolate = Isolate::Current();
uc16 bottom = from();
uc16 top = to();
@@ -5102,7 +5347,7 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
for (int i = 0; i < length; i++) {
uc32 chr = chars[i];
if (chr != bottom) {
- ranges->Add(CharacterRange::Singleton(chars[i]));
+ ranges->Add(CharacterRange::Singleton(chars[i]), zone);
}
}
} else {
@@ -5142,7 +5387,7 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
uc16 range_from = c - (block_end - pos);
uc16 range_to = c - (block_end - end);
if (!(bottom <= range_from && range_to <= top)) {
- ranges->Add(CharacterRange(range_from, range_to));
+ ranges->Add(CharacterRange(range_from, range_to), zone);
}
}
pos = end + 1;
@@ -5165,10 +5410,10 @@ bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
}
-ZoneList<CharacterRange>* CharacterSet::ranges() {
+ZoneList<CharacterRange>* CharacterSet::ranges(Zone* zone) {
if (ranges_ == NULL) {
- ranges_ = new ZoneList<CharacterRange>(2);
- CharacterRange::AddClassEscape(standard_set_type_, ranges_);
+ ranges_ = new(zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::AddClassEscape(standard_set_type_, ranges_, zone);
}
return ranges_;
}
@@ -5297,7 +5542,8 @@ void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
- ZoneList<CharacterRange>* negated_ranges) {
+ ZoneList<CharacterRange>* negated_ranges,
+ Zone* zone) {
ASSERT(CharacterRange::IsCanonical(ranges));
ASSERT_EQ(0, negated_ranges->length());
int range_count = ranges->length();
@@ -5309,12 +5555,13 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
}
while (i < range_count) {
CharacterRange range = ranges->at(i);
- negated_ranges->Add(CharacterRange(from + 1, range.from() - 1));
+ negated_ranges->Add(CharacterRange(from + 1, range.from() - 1), zone);
from = range.to();
i++;
}
if (from < String::kMaxUtf16CodeUnit) {
- negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit));
+ negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit),
+ zone);
}
}
@@ -5323,33 +5570,33 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
// Splay tree
-OutSet* OutSet::Extend(unsigned value) {
+OutSet* OutSet::Extend(unsigned value, Zone* zone) {
if (Get(value))
return this;
- if (successors() != NULL) {
- for (int i = 0; i < successors()->length(); i++) {
- OutSet* successor = successors()->at(i);
+ if (successors(zone) != NULL) {
+ for (int i = 0; i < successors(zone)->length(); i++) {
+ OutSet* successor = successors(zone)->at(i);
if (successor->Get(value))
return successor;
}
} else {
- successors_ = new ZoneList<OutSet*>(2);
+ successors_ = new(zone) ZoneList<OutSet*>(2, zone);
}
- OutSet* result = new OutSet(first_, remaining_);
- result->Set(value);
- successors()->Add(result);
+ OutSet* result = new(zone) OutSet(first_, remaining_);
+ result->Set(value, zone);
+ successors(zone)->Add(result, zone);
return result;
}
-void OutSet::Set(unsigned value) {
+void OutSet::Set(unsigned value, Zone *zone) {
if (value < kFirstLimit) {
first_ |= (1 << value);
} else {
if (remaining_ == NULL)
- remaining_ = new ZoneList<unsigned>(1);
+ remaining_ = new(zone) ZoneList<unsigned>(1, zone);
if (remaining_->is_empty() || !remaining_->Contains(value))
- remaining_->Add(value);
+ remaining_->Add(value, zone);
}
}
@@ -5368,13 +5615,15 @@ bool OutSet::Get(unsigned value) {
const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
-void DispatchTable::AddRange(CharacterRange full_range, int value) {
+void DispatchTable::AddRange(CharacterRange full_range, int value,
+ Zone* zone) {
CharacterRange current = full_range;
if (tree()->is_empty()) {
// If this is the first range we just insert into the table.
ZoneSplayTree<Config>::Locator loc;
ASSERT_RESULT(tree()->Insert(current.from(), &loc));
- loc.set_value(Entry(current.from(), current.to(), empty()->Extend(value)));
+ loc.set_value(Entry(current.from(), current.to(),
+ empty()->Extend(value, zone)));
return;
}
// First see if there is a range to the left of this one that
@@ -5417,7 +5666,7 @@ void DispatchTable::AddRange(CharacterRange full_range, int value) {
ASSERT_RESULT(tree()->Insert(current.from(), &ins));
ins.set_value(Entry(current.from(),
entry->from() - 1,
- empty()->Extend(value)));
+ empty()->Extend(value, zone)));
current.set_from(entry->from());
}
ASSERT_EQ(current.from(), entry->from());
@@ -5435,7 +5684,7 @@ void DispatchTable::AddRange(CharacterRange full_range, int value) {
// The overlapping range is now completely contained by the range
// we're adding so we can just update it and move the start point
// of the range we're adding just past it.
- entry->AddValue(value);
+ entry->AddValue(value, zone);
// Bail out if the last interval ended at 0xFFFF since otherwise
// adding 1 will wrap around to 0.
if (entry->to() == String::kMaxUtf16CodeUnit)
@@ -5448,7 +5697,7 @@ void DispatchTable::AddRange(CharacterRange full_range, int value) {
ASSERT_RESULT(tree()->Insert(current.from(), &ins));
ins.set_value(Entry(current.from(),
current.to(),
- empty()->Extend(value)));
+ empty()->Extend(value, zone)));
break;
}
}
@@ -5572,8 +5821,11 @@ void Analysis::VisitAssertion(AssertionNode* that) {
}
-void BackReferenceNode::FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+void BackReferenceNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
// Working out the set of characters that a backreference can match is too
// hard, so we just say that any character can match.
bm->SetRest(offset);
@@ -5585,9 +5837,13 @@ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
RegExpMacroAssembler::kTableSize);
-void ChoiceNode::FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+void ChoiceNode::FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
ZoneList<GuardedAlternative>* alts = alternatives();
+ budget = (budget - 1) / alts->length();
for (int i = 0; i < alts->length(); i++) {
GuardedAlternative& alt = alts->at(i);
if (alt.guards() != NULL && alt.guards()->length() != 0) {
@@ -5595,14 +5851,18 @@ void ChoiceNode::FillInBMInfo(
SaveBMInfo(bm, not_at_start, offset);
return;
}
- alt.node()->FillInBMInfo(offset, bm, not_at_start);
+ alt.node()->FillInBMInfo(
+ offset, recursion_depth + 1, budget, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
-void TextNode::FillInBMInfo(
- int initial_offset, BoyerMooreLookahead* bm, bool not_at_start) {
+void TextNode::FillInBMInfo(int initial_offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
if (initial_offset >= bm->length()) return;
int offset = initial_offset;
int max_char = bm->max_char();
@@ -5637,7 +5897,7 @@ void TextNode::FillInBMInfo(
} else {
ASSERT(text.type == TextElement::CHAR_CLASS);
RegExpCharacterClass* char_class = text.data.u_char_class;
- ZoneList<CharacterRange>* ranges = char_class->ranges();
+ ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
if (char_class->is_negated()) {
bm->SetAll(offset);
} else {
@@ -5656,6 +5916,8 @@ void TextNode::FillInBMInfo(
return;
}
on_success()->FillInBMInfo(offset,
+ recursion_depth + 1,
+ budget - 1,
bm,
true); // Not at start after a text node.
if (initial_offset == 0) set_bm_info(not_at_start, bm);
@@ -5755,7 +6017,7 @@ void DispatchTableConstructor::VisitText(TextNode* that) {
}
case TextElement::CHAR_CLASS: {
RegExpCharacterClass* tree = elm.data.u_char_class;
- ZoneList<CharacterRange>* ranges = tree->ranges();
+ ZoneList<CharacterRange>* ranges = tree->ranges(that->zone());
if (tree->is_negated()) {
AddInverse(ranges);
} else {
@@ -5780,14 +6042,16 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) {
RegExpEngine::CompilationResult RegExpEngine::Compile(
RegExpCompileData* data,
bool ignore_case,
+ bool is_global,
bool is_multiline,
Handle<String> pattern,
Handle<String> sample_subject,
- bool is_ascii) {
+ bool is_ascii,
+ Zone* zone) {
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
return IrregexpRegExpTooBig();
}
- RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii);
+ RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii, zone);
// Sample some characters from the middle of the string.
static const int kSampleSize = 128;
@@ -5817,7 +6081,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
RegExpQuantifier::ToNode(0,
RegExpTree::kInfinity,
false,
- new RegExpCharacterClass('*'),
+ new(zone) RegExpCharacterClass('*'),
&compiler,
captured_body,
data->contains_anchor);
@@ -5825,10 +6089,10 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
if (data->contains_anchor) {
// Unroll loop once, to take care of the case that might start
// at the start of input.
- ChoiceNode* first_step_node = new ChoiceNode(2);
+ ChoiceNode* first_step_node = new(zone) ChoiceNode(2, zone);
first_step_node->AddAlternative(GuardedAlternative(captured_body));
first_step_node->AddAlternative(GuardedAlternative(
- new TextNode(new RegExpCharacterClass('*'), loop_node)));
+ new(zone) TextNode(new(zone) RegExpCharacterClass('*'), loop_node)));
node = first_step_node;
} else {
node = loop_node;
@@ -5841,7 +6105,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
if (node != NULL) node = node->FilterASCII(RegExpCompiler::kMaxRecursion);
}
- if (node == NULL) node = new EndNode(EndNode::BACKTRACK);
+ if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
data->node = node;
Analysis analysis(ignore_case, is_ascii);
analysis.EnsureAnalyzed(node);
@@ -5859,19 +6123,23 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
: NativeRegExpMacroAssembler::UC16;
#if V8_TARGET_ARCH_IA32
- RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2);
+ RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_X64
- RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
+ RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_ARM
- RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
+ RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_MIPS
- RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2);
+ RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#endif
#else // V8_INTERPRETED_REGEXP
// Interpreted regexp implementation.
EmbeddedVector<byte, 1024> codes;
- RegExpMacroAssemblerIrregexp macro_assembler(codes);
+ RegExpMacroAssemblerIrregexp macro_assembler(codes, zone);
#endif // V8_INTERPRETED_REGEXP
// Inserted here, instead of in Assembler, because it depends on information
@@ -5883,6 +6151,13 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
macro_assembler.SetCurrentPositionFromEnd(max_length);
}
+ if (is_global) {
+ macro_assembler.set_global_mode(
+ (data->tree->min_match() > 0)
+ ? RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK
+ : RegExpMacroAssembler::GLOBAL);
+ }
+
return compiler.Assemble(&macro_assembler,
node,
data->capture_count,
diff --git a/src/3rdparty/v8/src/jsregexp.h b/src/3rdparty/v8/src/jsregexp.h
index 20313ca..96825ce 100644
--- a/src/3rdparty/v8/src/jsregexp.h
+++ b/src/3rdparty/v8/src/jsregexp.h
@@ -71,7 +71,8 @@ class RegExpImpl {
// Returns false if compilation fails.
static Handle<Object> Compile(Handle<JSRegExp> re,
Handle<String> pattern,
- Handle<String> flags);
+ Handle<String> flags,
+ Zone* zone);
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
@@ -92,6 +93,14 @@ class RegExpImpl {
JSRegExp::Flags flags,
Handle<String> match_pattern);
+
+ static int AtomExecRaw(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ int32_t* output,
+ int output_size);
+
+
static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
@@ -104,31 +113,71 @@ class RegExpImpl {
// This ensures that the regexp is compiled for the subject, and that
// the subject is flat.
// Returns the number of integer spaces required by IrregexpExecOnce
- // as its "registers" argument. If the regexp cannot be compiled,
+ // as its "registers" argument. If the regexp cannot be compiled,
// an exception is set as pending, and this function returns negative.
static int IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject);
- // Execute a regular expression once on the subject, starting from
- // character "index".
- // If successful, returns RE_SUCCESS and set the capture positions
- // in the first registers.
+ // Execute a regular expression on the subject, starting from index.
+ // If matching succeeds, return the number of matches. This can be larger
+ // than one in the case of global regular expressions.
+ // The captures and subcaptures are stored into the registers vector.
// If matching fails, returns RE_FAILURE.
// If execution fails, sets a pending exception and returns RE_EXCEPTION.
- static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Vector<int> registers);
+ static int IrregexpExecRaw(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ int32_t* output,
+ int output_size);
// Execute an Irregexp bytecode pattern.
// On a successful match, the result is a JSArray containing
- // captured positions. On a failure, the result is the null value.
+ // captured positions. On a failure, the result is the null value.
// Returns an empty handle in case of an exception.
static Handle<Object> IrregexpExec(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
Handle<JSArray> lastMatchInfo);
+ // Set last match info. If match is NULL, then setting captures is omitted.
+ static Handle<JSArray> SetLastMatchInfo(Handle<JSArray> last_match_info,
+ Handle<String> subject,
+ int capture_count,
+ int32_t* match);
+
+
+ class GlobalCache {
+ public:
+ GlobalCache(Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ bool is_global,
+ Isolate* isolate);
+
+ ~GlobalCache();
+
+ // Fetch the next entry in the cache for global regexp match results.
+ // This does not set the last match info. Upon failure, NULL is returned.
+ // The cause can be checked with Result(). The previous
+ // result is still in available in memory when a failure happens.
+ int32_t* FetchNext();
+
+ int32_t* LastSuccessfulMatch();
+
+ inline bool HasException() { return num_matches_ < 0; }
+
+ private:
+ int num_matches_;
+ int max_matches_;
+ int current_match_index_;
+ int registers_per_match_;
+ // Pointer to the last set of captures.
+ int32_t* register_array_;
+ int register_array_size_;
+ Handle<JSRegExp> regexp_;
+ Handle<String> subject_;
+ };
+
+
// Array index in the lastMatchInfo array.
static const int kLastCaptureCount = 0;
static const int kLastSubject = 1;
@@ -188,30 +237,10 @@ class RegExpImpl {
static const int kRegWxpCompiledLimit = 1 * MB;
private:
- static String* last_ascii_string_;
- static String* two_byte_cached_string_;
-
static bool CompileIrregexp(
Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
static inline bool EnsureCompiledIrregexp(
Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
-
-
- // Set the subject cache. The previous string buffer is not deleted, so the
- // caller should ensure that it doesn't leak.
- static void SetSubjectCache(String* subject,
- char* utf8_subject,
- int uft8_length,
- int character_position,
- int utf8_position);
-
- // A one element cache of the last utf8_subject string and its length. The
- // subject JS String object is cached in the heap. We also cache a
- // translation between position and utf8 position.
- static char* utf8_subject_cache_;
- static int utf8_length_cache_;
- static int utf8_position_;
- static int character_position_;
};
@@ -233,7 +262,8 @@ class CharacterRange {
// For compatibility with the CHECK_OK macro
CharacterRange(void* null) { ASSERT_EQ(NULL, null); } //NOLINT
CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
- static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges);
+ static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
+ Zone* zone);
static Vector<const int> GetWordBounds();
static inline CharacterRange Singleton(uc16 value) {
return CharacterRange(value, value);
@@ -253,11 +283,13 @@ class CharacterRange {
bool is_valid() { return from_ <= to_; }
bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii);
+ void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii,
+ Zone* zone);
static void Split(ZoneList<CharacterRange>* base,
Vector<const int> overlay,
ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded);
+ ZoneList<CharacterRange>** excluded,
+ Zone* zone);
// Whether a range list is in canonical form: Ranges ordered by from value,
// and ranges non-overlapping and non-adjacent.
static bool IsCanonical(ZoneList<CharacterRange>* ranges);
@@ -268,7 +300,8 @@ class CharacterRange {
static void Canonicalize(ZoneList<CharacterRange>* ranges);
// Negate the contents of a character range in canonical form.
static void Negate(ZoneList<CharacterRange>* src,
- ZoneList<CharacterRange>* dst);
+ ZoneList<CharacterRange>* dst,
+ Zone* zone);
static const int kStartMarker = (1 << 24);
static const int kPayloadMask = (1 << 24) - 1;
@@ -283,7 +316,7 @@ class CharacterRange {
class OutSet: public ZoneObject {
public:
OutSet() : first_(0), remaining_(NULL), successors_(NULL) { }
- OutSet* Extend(unsigned value);
+ OutSet* Extend(unsigned value, Zone* zone);
bool Get(unsigned value);
static const unsigned kFirstLimit = 32;
@@ -291,12 +324,12 @@ class OutSet: public ZoneObject {
// Destructively set a value in this set. In most cases you want
// to use Extend instead to ensure that only one instance exists
// that contains the same values.
- void Set(unsigned value);
+ void Set(unsigned value, Zone* zone);
// The successors are a list of sets that contain the same values
// as this set and the one more value that is not present in this
// set.
- ZoneList<OutSet*>* successors() { return successors_; }
+ ZoneList<OutSet*>* successors(Zone* zone) { return successors_; }
OutSet(uint32_t first, ZoneList<unsigned>* remaining)
: first_(first), remaining_(remaining), successors_(NULL) { }
@@ -311,6 +344,8 @@ class OutSet: public ZoneObject {
// Used for mapping character ranges to choices.
class DispatchTable : public ZoneObject {
public:
+ explicit DispatchTable(Zone* zone) : tree_(zone) { }
+
class Entry {
public:
Entry() : from_(0), to_(0), out_set_(NULL) { }
@@ -319,7 +354,9 @@ class DispatchTable : public ZoneObject {
uc16 from() { return from_; }
uc16 to() { return to_; }
void set_to(uc16 value) { to_ = value; }
- void AddValue(int value) { out_set_ = out_set_->Extend(value); }
+ void AddValue(int value, Zone* zone) {
+ out_set_ = out_set_->Extend(value, zone);
+ }
OutSet* out_set() { return out_set_; }
private:
uc16 from_;
@@ -343,12 +380,14 @@ class DispatchTable : public ZoneObject {
}
};
- void AddRange(CharacterRange range, int value);
+ void AddRange(CharacterRange range, int value, Zone* zone);
OutSet* Get(uc16 value);
void Dump();
template <typename Callback>
- void ForEach(Callback* callback) { return tree()->ForEach(callback); }
+ void ForEach(Callback* callback) {
+ return tree()->ForEach(callback);
+ }
private:
// There can't be a static empty set since it allocates its
@@ -528,7 +567,8 @@ extern int kUninitializedRegExpNodePlaceHolder;
class RegExpNode: public ZoneObject {
public:
- RegExpNode() : replacement_(NULL), trace_count_(0) {
+ explicit RegExpNode(Zone* zone)
+ : replacement_(NULL), trace_count_(0), zone_(zone) {
bm_info_[0] = bm_info_[1] = NULL;
}
virtual ~RegExpNode();
@@ -574,9 +614,14 @@ class RegExpNode: public ZoneObject {
// Collects information on the possible code units (mod 128) that can match if
// we look forward. This is used for a Boyer-Moore-like string searching
// implementation. TODO(erikcorry): This should share more code with
- // EatsAtLeast, GetQuickCheckDetails.
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ // EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
+ // the number of nodes we are willing to look at in order to create this data.
+ static const int kFillInBMBudget = 200;
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
UNREACHABLE();
}
@@ -617,6 +662,8 @@ class RegExpNode: public ZoneObject {
return bm_info_[not_at_start ? 1 : 0];
}
+ Zone* zone() const { return zone_; }
+
protected:
enum LimitResult { DONE, CONTINUE };
RegExpNode* replacement_;
@@ -638,6 +685,8 @@ class RegExpNode: public ZoneObject {
// deferred operations in the current trace and generating a goto.
int trace_count_;
BoyerMooreLookahead* bm_info_[2];
+
+ Zone* zone_;
};
@@ -671,13 +720,17 @@ class Interval {
class SeqRegExpNode: public RegExpNode {
public:
explicit SeqRegExpNode(RegExpNode* on_success)
- : on_success_(on_success) { }
+ : RegExpNode(on_success->zone()), on_success_(on_success) { }
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
virtual RegExpNode* FilterASCII(int depth);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
- on_success_->FillInBMInfo(offset, bm, not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
+ on_success_->FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
@@ -730,8 +783,11 @@ class ActionNode: public SeqRegExpNode {
return on_success()->GetQuickCheckDetails(
details, compiler, filled_in, not_at_start);
}
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
Type type() { return type_; }
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
@@ -782,8 +838,8 @@ class TextNode: public SeqRegExpNode {
TextNode(RegExpCharacterClass* that,
RegExpNode* on_success)
: SeqRegExpNode(on_success),
- elms_(new ZoneList<TextElement>(1)) {
- elms_->Add(TextElement::CharClass(that));
+ elms_(new(zone()) ZoneList<TextElement>(1, zone())) {
+ elms_->Add(TextElement::CharClass(that), zone());
}
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
@@ -799,8 +855,11 @@ class TextNode: public SeqRegExpNode {
virtual int GreedyLoopTextLength();
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
void CalculateOffsets();
virtual RegExpNode* FilterASCII(int depth);
@@ -836,19 +895,19 @@ class AssertionNode: public SeqRegExpNode {
AFTER_NEWLINE
};
static AssertionNode* AtEnd(RegExpNode* on_success) {
- return new AssertionNode(AT_END, on_success);
+ return new(on_success->zone()) AssertionNode(AT_END, on_success);
}
static AssertionNode* AtStart(RegExpNode* on_success) {
- return new AssertionNode(AT_START, on_success);
+ return new(on_success->zone()) AssertionNode(AT_START, on_success);
}
static AssertionNode* AtBoundary(RegExpNode* on_success) {
- return new AssertionNode(AT_BOUNDARY, on_success);
+ return new(on_success->zone()) AssertionNode(AT_BOUNDARY, on_success);
}
static AssertionNode* AtNonBoundary(RegExpNode* on_success) {
- return new AssertionNode(AT_NON_BOUNDARY, on_success);
+ return new(on_success->zone()) AssertionNode(AT_NON_BOUNDARY, on_success);
}
static AssertionNode* AfterNewline(RegExpNode* on_success) {
- return new AssertionNode(AFTER_NEWLINE, on_success);
+ return new(on_success->zone()) AssertionNode(AFTER_NEWLINE, on_success);
}
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
@@ -859,8 +918,11 @@ class AssertionNode: public SeqRegExpNode {
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
AssertionNodeType type() { return type_; }
void set_type(AssertionNodeType type) { type_ = type; }
@@ -897,8 +959,11 @@ class BackReferenceNode: public SeqRegExpNode {
bool not_at_start) {
return;
}
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
private:
int start_reg_;
@@ -909,7 +974,8 @@ class BackReferenceNode: public SeqRegExpNode {
class EndNode: public RegExpNode {
public:
enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
- explicit EndNode(Action action) : action_(action) { }
+ explicit EndNode(Action action, Zone* zone)
+ : RegExpNode(zone), action_(action) { }
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
virtual int EatsAtLeast(int still_to_find,
@@ -922,8 +988,11 @@ class EndNode: public RegExpNode {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
@@ -938,8 +1007,9 @@ class NegativeSubmatchSuccess: public EndNode {
NegativeSubmatchSuccess(int stack_pointer_reg,
int position_reg,
int clear_capture_count,
- int clear_capture_start)
- : EndNode(NEGATIVE_SUBMATCH_SUCCESS),
+ int clear_capture_start,
+ Zone* zone)
+ : EndNode(NEGATIVE_SUBMATCH_SUCCESS, zone),
stack_pointer_register_(stack_pointer_reg),
current_position_register_(position_reg),
clear_capture_count_(clear_capture_count),
@@ -975,7 +1045,7 @@ class Guard: public ZoneObject {
class GuardedAlternative {
public:
explicit GuardedAlternative(RegExpNode* node) : node_(node), guards_(NULL) { }
- void AddGuard(Guard* guard);
+ void AddGuard(Guard* guard, Zone* zone);
RegExpNode* node() { return node_; }
void set_node(RegExpNode* node) { node_ = node; }
ZoneList<Guard*>* guards() { return guards_; }
@@ -991,13 +1061,17 @@ class AlternativeGeneration;
class ChoiceNode: public RegExpNode {
public:
- explicit ChoiceNode(int expected_size)
- : alternatives_(new ZoneList<GuardedAlternative>(expected_size)),
+ explicit ChoiceNode(int expected_size, Zone* zone)
+ : RegExpNode(zone),
+ alternatives_(new(zone)
+ ZoneList<GuardedAlternative>(expected_size, zone)),
table_(NULL),
not_at_start_(false),
being_calculated_(false) { }
virtual void Accept(NodeVisitor* visitor);
- void AddAlternative(GuardedAlternative node) { alternatives()->Add(node); }
+ void AddAlternative(GuardedAlternative node) {
+ alternatives()->Add(node, zone());
+ }
ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
DispatchTable* GetTable(bool ignore_case);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
@@ -1012,8 +1086,11 @@ class ChoiceNode: public RegExpNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
bool being_calculated() { return being_calculated_; }
bool not_at_start() { return not_at_start_; }
@@ -1050,8 +1127,9 @@ class ChoiceNode: public RegExpNode {
class NegativeLookaheadChoiceNode: public ChoiceNode {
public:
explicit NegativeLookaheadChoiceNode(GuardedAlternative this_must_fail,
- GuardedAlternative then_do_this)
- : ChoiceNode(2) {
+ GuardedAlternative then_do_this,
+ Zone* zone)
+ : ChoiceNode(2, zone) {
AddAlternative(this_must_fail);
AddAlternative(then_do_this);
}
@@ -1062,9 +1140,13 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start) {
- alternatives_->at(1).node()->FillInBMInfo(offset, bm, not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
+ alternatives_->at(1).node()->FillInBMInfo(
+ offset, recursion_depth + 1, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
// For a negative lookahead we don't emit the quick check for the
@@ -1079,8 +1161,8 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
class LoopChoiceNode: public ChoiceNode {
public:
- explicit LoopChoiceNode(bool body_can_be_zero_length)
- : ChoiceNode(2),
+ explicit LoopChoiceNode(bool body_can_be_zero_length, Zone* zone)
+ : ChoiceNode(2, zone),
loop_node_(NULL),
continue_node_(NULL),
body_can_be_zero_length_(body_can_be_zero_length) { }
@@ -1094,8 +1176,11 @@ class LoopChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
- virtual void FillInBMInfo(
- int offset, BoyerMooreLookahead* bm, bool not_at_start);
+ virtual void FillInBMInfo(int offset,
+ int recursion_depth,
+ int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start);
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
@@ -1162,15 +1247,15 @@ ContainedInLattice AddRange(ContainedInLattice a,
class BoyerMoorePositionInfo : public ZoneObject {
public:
- BoyerMoorePositionInfo()
- : map_(new ZoneList<bool>(kMapSize)),
+ explicit BoyerMoorePositionInfo(Zone* zone)
+ : map_(new(zone) ZoneList<bool>(kMapSize, zone)),
map_count_(0),
w_(kNotYet),
s_(kNotYet),
d_(kNotYet),
surrogate_(kNotYet) {
for (int i = 0; i < kMapSize; i++) {
- map_->Add(false);
+ map_->Add(false, zone);
}
}
@@ -1199,7 +1284,7 @@ class BoyerMoorePositionInfo : public ZoneObject {
class BoyerMooreLookahead : public ZoneObject {
public:
- BoyerMooreLookahead(int length, RegExpCompiler* compiler);
+ BoyerMooreLookahead(int length, RegExpCompiler* compiler, Zone* zone);
int length() { return length_; }
int max_char() { return max_char_; }
@@ -1401,12 +1486,13 @@ class Trace {
void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
private:
- int FindAffectedRegisters(OutSet* affected_registers);
+ int FindAffectedRegisters(OutSet* affected_registers, Zone* zone);
void PerformDeferredActions(RegExpMacroAssembler* macro,
- int max_register,
- OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear);
+ int max_register,
+ OutSet& affected_registers,
+ OutSet* registers_to_pop,
+ OutSet* registers_to_clear,
+ Zone* zone);
void RestoreAffectedRegisters(RegExpMacroAssembler* macro,
int max_register,
OutSet& registers_to_pop,
@@ -1439,15 +1525,17 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
// dispatch table of a choice node.
class DispatchTableConstructor: public NodeVisitor {
public:
- DispatchTableConstructor(DispatchTable* table, bool ignore_case)
+ DispatchTableConstructor(DispatchTable* table, bool ignore_case,
+ Zone* zone)
: table_(table),
choice_index_(-1),
- ignore_case_(ignore_case) { }
+ ignore_case_(ignore_case),
+ zone_(zone) { }
void BuildTable(ChoiceNode* node);
void AddRange(CharacterRange range) {
- table()->AddRange(range, choice_index_);
+ table()->AddRange(range, choice_index_, zone_);
}
void AddInverse(ZoneList<CharacterRange>* ranges);
@@ -1464,6 +1552,7 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
DispatchTable* table_;
int choice_index_;
bool ignore_case_;
+ Zone* zone_;
};
@@ -1545,48 +1634,16 @@ class RegExpEngine: public AllStatic {
static CompilationResult Compile(RegExpCompileData* input,
bool ignore_case,
+ bool global,
bool multiline,
Handle<String> pattern,
Handle<String> sample_subject,
- bool is_ascii);
+ bool is_ascii, Zone* zone);
static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
};
-class OffsetsVector {
- public:
- inline OffsetsVector(int num_registers, Isolate* isolate)
- : offsets_vector_length_(num_registers) {
- if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- vector_ = NewArray<int>(offsets_vector_length_);
- } else {
- vector_ = isolate->jsregexp_static_offsets_vector();
- }
- }
- inline ~OffsetsVector() {
- if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- DeleteArray(vector_);
- vector_ = NULL;
- }
- }
- inline int* vector() { return vector_; }
- inline int length() { return offsets_vector_length_; }
-
- static const int kStaticOffsetsVectorSize = 50;
-
- private:
- static Address static_offsets_vector_address(Isolate* isolate) {
- return reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector());
- }
-
- int* vector_;
- int offsets_vector_length_;
-
- friend class ExternalReference;
-};
-
-
} } // namespace v8::internal
#endif // V8_JSREGEXP_H_
diff --git a/src/3rdparty/v8/src/list-inl.h b/src/3rdparty/v8/src/list-inl.h
index 6cf3bad..60a033d 100644
--- a/src/3rdparty/v8/src/list-inl.h
+++ b/src/3rdparty/v8/src/list-inl.h
@@ -35,25 +35,25 @@ namespace internal {
template<typename T, class P>
-void List<T, P>::Add(const T& element) {
+void List<T, P>::Add(const T& element, P alloc) {
if (length_ < capacity_) {
data_[length_++] = element;
} else {
- List<T, P>::ResizeAdd(element);
+ List<T, P>::ResizeAdd(element, alloc);
}
}
template<typename T, class P>
-void List<T, P>::AddAll(const List<T, P>& other) {
- AddAll(other.ToVector());
+void List<T, P>::AddAll(const List<T, P>& other, P alloc) {
+ AddAll(other.ToVector(), alloc);
}
template<typename T, class P>
-void List<T, P>::AddAll(const Vector<T>& other) {
+void List<T, P>::AddAll(const Vector<T>& other, P alloc) {
int result_length = length_ + other.length();
- if (capacity_ < result_length) Resize(result_length);
+ if (capacity_ < result_length) Resize(result_length, alloc);
for (int i = 0; i < other.length(); i++) {
data_[length_ + i] = other.at(i);
}
@@ -64,13 +64,13 @@ void List<T, P>::AddAll(const Vector<T>& other) {
// Use two layers of inlining so that the non-inlined function can
// use the same implementation as the inlined version.
template<typename T, class P>
-void List<T, P>::ResizeAdd(const T& element) {
- ResizeAddInternal(element);
+void List<T, P>::ResizeAdd(const T& element, P alloc) {
+ ResizeAddInternal(element, alloc);
}
template<typename T, class P>
-void List<T, P>::ResizeAddInternal(const T& element) {
+void List<T, P>::ResizeAddInternal(const T& element, P alloc) {
ASSERT(length_ >= capacity_);
// Grow the list capacity by 100%, but make sure to let it grow
// even when the capacity is zero (possible initial case).
@@ -78,14 +78,14 @@ void List<T, P>::ResizeAddInternal(const T& element) {
// Since the element reference could be an element of the list, copy
// it out of the old backing storage before resizing.
T temp = element;
- Resize(new_capacity);
+ Resize(new_capacity, alloc);
data_[length_++] = temp;
}
template<typename T, class P>
-void List<T, P>::Resize(int new_capacity) {
- T* new_data = List<T, P>::NewData(new_capacity);
+void List<T, P>::Resize(int new_capacity, P alloc) {
+ T* new_data = NewData(new_capacity, alloc);
memcpy(new_data, data_, capacity_ * sizeof(T));
List<T, P>::DeleteData(data_);
data_ = new_data;
@@ -94,17 +94,17 @@ void List<T, P>::Resize(int new_capacity) {
template<typename T, class P>
-Vector<T> List<T, P>::AddBlock(T value, int count) {
+Vector<T> List<T, P>::AddBlock(T value, int count, P alloc) {
int start = length_;
- for (int i = 0; i < count; i++) Add(value);
+ for (int i = 0; i < count; i++) Add(value, alloc);
return Vector<T>(&data_[start], count);
}
template<typename T, class P>
-void List<T, P>::InsertAt(int index, const T& elm) {
+void List<T, P>::InsertAt(int index, const T& elm, P alloc) {
ASSERT(index >= 0 && index <= length_);
- Add(elm);
+ Add(elm, alloc);
for (int i = length_ - 1; i > index; --i) {
data_[i] = data_[i - 1];
}
@@ -137,9 +137,9 @@ bool List<T, P>::RemoveElement(const T& elm) {
template<typename T, class P>
-void List<T, P>::Allocate(int length) {
+void List<T, P>::Allocate(int length, P allocator) {
DeleteData(data_);
- Initialize(length);
+ Initialize(length, allocator);
length_ = length;
}
@@ -147,7 +147,11 @@ void List<T, P>::Allocate(int length) {
template<typename T, class P>
void List<T, P>::Clear() {
DeleteData(data_);
- Initialize(0);
+ // We don't call Initialize(0) since that requires passing a Zone,
+ // which we don't really need.
+ data_ = NULL;
+ capacity_ = 0;
+ length_ = 0;
}
@@ -207,9 +211,9 @@ void List<T, P>::Sort() {
template<typename T, class P>
-void List<T, P>::Initialize(int capacity) {
+void List<T, P>::Initialize(int capacity, P allocator) {
ASSERT(capacity >= 0);
- data_ = (capacity > 0) ? NewData(capacity) : NULL;
+ data_ = (capacity > 0) ? NewData(capacity, allocator) : NULL;
capacity_ = capacity;
length_ = 0;
}
diff --git a/src/3rdparty/v8/src/list.h b/src/3rdparty/v8/src/list.h
index 7350c0d..7fd4f5c 100644
--- a/src/3rdparty/v8/src/list.h
+++ b/src/3rdparty/v8/src/list.h
@@ -45,12 +45,18 @@ namespace internal {
// the C free store or the zone; see zone.h.
// Forward defined as
-// template <typename T, class P = FreeStoreAllocationPolicy> class List;
-template <typename T, class P>
+// template <typename T,
+// class AllocationPolicy = FreeStoreAllocationPolicy> class List;
+template <typename T, class AllocationPolicy>
class List {
public:
- List() { Initialize(0); }
- INLINE(explicit List(int capacity)) { Initialize(capacity); }
+ explicit List(AllocationPolicy allocator = AllocationPolicy()) {
+ Initialize(0, allocator);
+ }
+ INLINE(explicit List(int capacity,
+ AllocationPolicy allocator = AllocationPolicy())) {
+ Initialize(capacity, allocator);
+ }
INLINE(~List()) { DeleteData(data_); }
// Deallocates memory used by the list and leaves the list in a consistent
@@ -60,10 +66,18 @@ class List {
Initialize(0);
}
- INLINE(void* operator new(size_t size)) {
- return P::New(static_cast<int>(size));
+ INLINE(void* operator new(size_t size,
+ AllocationPolicy allocator = AllocationPolicy())) {
+ return allocator.New(static_cast<int>(size));
+ }
+ INLINE(void operator delete(void* p)) {
+ AllocationPolicy::Delete(p);
+ }
+
+ // Please the MSVC compiler. We should never have to execute this.
+ INLINE(void operator delete(void* p, AllocationPolicy allocator)) {
+ UNREACHABLE();
}
- INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
// Returns a reference to the element at index i. This reference is
// not safe to use after operations that can change the list's
@@ -87,21 +101,25 @@ class List {
// Adds a copy of the given 'element' to the end of the list,
// expanding the list if necessary.
- void Add(const T& element);
+ void Add(const T& element, AllocationPolicy allocator = AllocationPolicy());
// Add all the elements from the argument list to this list.
- void AddAll(const List<T, P>& other);
+ void AddAll(const List<T, AllocationPolicy>& other,
+ AllocationPolicy allocator = AllocationPolicy());
// Add all the elements from the vector to this list.
- void AddAll(const Vector<T>& other);
+ void AddAll(const Vector<T>& other,
+ AllocationPolicy allocator = AllocationPolicy());
// Inserts the element at the specific index.
- void InsertAt(int index, const T& element);
+ void InsertAt(int index, const T& element,
+ AllocationPolicy allocator = AllocationPolicy());
// Added 'count' elements with the value 'value' and returns a
// vector that allows access to the elements. The vector is valid
// until the next change is made to this list.
- Vector<T> AddBlock(T value, int count);
+ Vector<T> AddBlock(T value, int count,
+ AllocationPolicy allocator = AllocationPolicy());
// Removes the i'th element without deleting it even if T is a
// pointer type; moves all elements above i "down". Returns the
@@ -118,7 +136,8 @@ class List {
INLINE(T RemoveLast()) { return Remove(length_ - 1); }
// Deletes current list contents and allocates space for 'length' elements.
- INLINE(void Allocate(int length));
+ INLINE(void Allocate(int length,
+ AllocationPolicy allocator = AllocationPolicy()));
// Clears the list by setting the length to zero. Even if T is a
// pointer type, clearing the list doesn't delete the entries.
@@ -142,26 +161,31 @@ class List {
void Sort(int (*cmp)(const T* x, const T* y));
void Sort();
- INLINE(void Initialize(int capacity));
+ INLINE(void Initialize(int capacity,
+ AllocationPolicy allocator = AllocationPolicy()));
private:
T* data_;
int capacity_;
int length_;
- INLINE(T* NewData(int n)) { return static_cast<T*>(P::New(n * sizeof(T))); }
- INLINE(void DeleteData(T* data)) { P::Delete(data); }
+ INLINE(T* NewData(int n, AllocationPolicy allocator)) {
+ return static_cast<T*>(allocator.New(n * sizeof(T)));
+ }
+ INLINE(void DeleteData(T* data)) {
+ AllocationPolicy::Delete(data);
+ }
// Increase the capacity of a full list, and add an element.
// List must be full already.
- void ResizeAdd(const T& element);
+ void ResizeAdd(const T& element, AllocationPolicy allocator);
// Inlined implementation of ResizeAdd, shared by inlined and
// non-inlined versions of ResizeAdd.
- void ResizeAddInternal(const T& element);
+ void ResizeAddInternal(const T& element, AllocationPolicy allocator);
// Resize the list.
- void Resize(int new_capacity);
+ void Resize(int new_capacity, AllocationPolicy allocator);
DISALLOW_COPY_AND_ASSIGN(List);
};
diff --git a/src/3rdparty/v8/src/lithium-allocator.cc b/src/3rdparty/v8/src/lithium-allocator.cc
index 9534f9e..91a9811 100644
--- a/src/3rdparty/v8/src/lithium-allocator.cc
+++ b/src/3rdparty/v8/src/lithium-allocator.cc
@@ -230,9 +230,9 @@ LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
if (HasRegisterAssigned()) {
ASSERT(!IsSpilled());
if (IsDouble()) {
- op = LDoubleRegister::Create(assigned_register());
+ op = LDoubleRegister::Create(assigned_register(), zone);
} else {
- op = LRegister::Create(assigned_register());
+ op = LRegister::Create(assigned_register(), zone);
}
} else if (IsSpilled()) {
ASSERT(!HasRegisterAssigned());
@@ -533,14 +533,14 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
LAllocator::LAllocator(int num_values, HGraph* graph)
: zone_(graph->zone()),
chunk_(NULL),
- live_in_sets_(graph->blocks()->length()),
- live_ranges_(num_values * 2),
+ live_in_sets_(graph->blocks()->length(), zone_),
+ live_ranges_(num_values * 2, zone_),
fixed_live_ranges_(NULL),
fixed_double_live_ranges_(NULL),
- unhandled_live_ranges_(num_values * 2),
- active_live_ranges_(8),
- inactive_live_ranges_(8),
- reusable_slots_(8),
+ unhandled_live_ranges_(num_values * 2, zone_),
+ active_live_ranges_(8, zone_),
+ inactive_live_ranges_(8, zone_),
+ reusable_slots_(8, zone_),
next_virtual_register_(num_values),
first_artificial_register_(num_values),
mode_(GENERAL_REGISTERS),
@@ -553,8 +553,8 @@ LAllocator::LAllocator(int num_values, HGraph* graph)
void LAllocator::InitializeLivenessAnalysis() {
// Initialize the live_in sets for each block to NULL.
int block_count = graph_->blocks()->length();
- live_in_sets_.Initialize(block_count);
- live_in_sets_.AddBlock(NULL, block_count);
+ live_in_sets_.Initialize(block_count, zone());
+ live_in_sets_.AddBlock(NULL, block_count, zone());
}
@@ -630,7 +630,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
TraceAlloc("Fixed reg is tagged at %d\n", pos);
LInstruction* instr = InstructionAt(pos);
if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(operand);
+ instr->pointer_map()->RecordPointer(operand, zone());
}
}
return operand;
@@ -665,7 +665,7 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
LiveRange* LAllocator::LiveRangeFor(int index) {
if (index >= live_ranges_.length()) {
- live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1);
+ live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
}
LiveRange* result = live_ranges_[index];
if (result == NULL) {
@@ -746,7 +746,7 @@ void LAllocator::AddConstraintsGapMove(int index,
LOperand* from,
LOperand* to) {
LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
if (from->IsUnallocated()) {
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
@@ -755,13 +755,13 @@ void LAllocator::AddConstraintsGapMove(int index,
if (cur_to->IsUnallocated()) {
if (LUnallocated::cast(cur_to)->virtual_register() ==
LUnallocated::cast(from)->virtual_register()) {
- move->AddMove(cur.source(), to);
+ move->AddMove(cur.source(), to, zone());
return;
}
}
}
}
- move->AddMove(from, to);
+ move->AddMove(from, to, zone());
}
@@ -800,7 +800,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
LiveRange* range = LiveRangeFor(first_output->virtual_register());
bool assigned = false;
if (first_output->HasFixedPolicy()) {
- LUnallocated* output_copy = first_output->CopyUnconstrained();
+ LUnallocated* output_copy = first_output->CopyUnconstrained(zone());
bool is_tagged = HasTaggedValue(first_output->virtual_register());
AllocateFixed(first_output, gap_index, is_tagged);
@@ -821,8 +821,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
// Thus it should be inserted to a lifetime position corresponding to
// the instruction end.
LGap* gap = GapAt(gap_index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE);
- move->AddMove(first_output, range->GetSpillOperand());
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE, zone());
+ move->AddMove(first_output, range->GetSpillOperand(), zone());
}
}
@@ -831,7 +831,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
for (UseIterator it(second); !it.Done(); it.Advance()) {
LUnallocated* cur_input = LUnallocated::cast(it.Current());
if (cur_input->HasFixedPolicy()) {
- LUnallocated* input_copy = cur_input->CopyUnconstrained();
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
AllocateFixed(cur_input, gap_index + 1, is_tagged);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -840,7 +840,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
// of the instruction.
ASSERT(!cur_input->IsUsedAtStart());
- LUnallocated* input_copy = cur_input->CopyUnconstrained();
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
cur_input->set_virtual_register(GetVirtualRegister());
if (!AllocationOk()) return;
@@ -864,7 +864,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
- LUnallocated* input_copy = cur_input->CopyUnconstrained();
+ LUnallocated* input_copy = cur_input->CopyUnconstrained(zone());
cur_input->set_virtual_register(second_output->virtual_register());
AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -872,7 +872,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
int index = gap_index + 1;
LInstruction* instr = InstructionAt(index);
if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(input_copy);
+ instr->pointer_map()->RecordPointer(input_copy, zone());
}
} else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
// The input is assumed to immediately have a tagged representation,
@@ -901,7 +901,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (IsGapAt(index)) {
// We have a gap at this position.
LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands* cur = &move_operands->at(i);
@@ -1046,17 +1046,17 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
InstructionAt(cur_block->last_instruction_index());
if (branch->HasPointerMap()) {
if (phi->representation().IsTagged()) {
- branch->pointer_map()->RecordPointer(phi_operand);
+ branch->pointer_map()->RecordPointer(phi_operand, zone());
} else if (!phi->representation().IsDouble()) {
- branch->pointer_map()->RecordUntagged(phi_operand);
+ branch->pointer_map()->RecordUntagged(phi_operand, zone());
}
}
}
LiveRange* live_range = LiveRangeFor(phi->id());
LLabel* label = chunk_->GetLabel(phi->block()->block_id());
- label->GetOrCreateParallelMove(LGap::START)->
- AddMove(phi_operand, live_range->GetSpillOperand());
+ label->GetOrCreateParallelMove(LGap::START, zone())->
+ AddMove(phi_operand, live_range->GetSpillOperand(), zone());
live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
}
}
@@ -1064,7 +1064,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
bool LAllocator::Allocate(LChunk* chunk) {
ASSERT(chunk_ == NULL);
- chunk_ = chunk;
+ chunk_ = static_cast<LPlatformChunk*>(chunk);
MeetRegisterConstraints();
if (!AllocationOk()) return false;
ResolvePhis();
@@ -1151,14 +1151,15 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
LInstruction* branch = InstructionAt(pred->last_instruction_index());
if (branch->HasPointerMap()) {
if (HasTaggedValue(range->id())) {
- branch->pointer_map()->RecordPointer(cur_op);
+ branch->pointer_map()->RecordPointer(cur_op, zone());
} else if (!cur_op->IsDoubleStackSlot() &&
!cur_op->IsDoubleRegister()) {
branch->pointer_map()->RemovePointer(cur_op);
}
}
}
- gap->GetOrCreateParallelMove(LGap::START)->AddMove(pred_op, cur_op);
+ gap->GetOrCreateParallelMove(
+ LGap::START, zone())->AddMove(pred_op, cur_op, zone());
}
}
}
@@ -1169,11 +1170,11 @@ LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
if (IsGapAt(index)) {
LGap* gap = GapAt(index);
return gap->GetOrCreateParallelMove(
- pos.IsInstructionStart() ? LGap::START : LGap::END);
+ pos.IsInstructionStart() ? LGap::START : LGap::END, zone());
}
int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
return GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? LGap::AFTER : LGap::BEFORE);
+ (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, zone());
}
@@ -1205,7 +1206,7 @@ void LAllocator::ConnectRanges() {
LParallelMove* move = GetConnectingParallelMove(pos);
LOperand* prev_operand = first_range->CreateAssignedOperand(zone_);
LOperand* cur_operand = second_range->CreateAssignedOperand(zone_);
- move->AddMove(prev_operand, cur_operand);
+ move->AddMove(prev_operand, cur_operand, zone());
}
}
}
@@ -1270,7 +1271,7 @@ void LAllocator::BuildLiveRanges() {
LOperand* hint = NULL;
LOperand* phi_operand = NULL;
LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+ LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START, zone());
for (int j = 0; j < move->move_operands()->length(); ++j) {
LOperand* to = move->move_operands()->at(j).destination();
if (to->IsUnallocated() &&
@@ -1421,7 +1422,7 @@ void LAllocator::PopulatePointerMaps() {
safe_point >= range->spill_start_index()) {
TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
range->id(), range->spill_start_index(), safe_point);
- map->RecordPointer(range->GetSpillOperand());
+ map->RecordPointer(range->GetSpillOperand(), zone());
}
if (!cur->IsSpilled()) {
@@ -1430,7 +1431,7 @@ void LAllocator::PopulatePointerMaps() {
cur->id(), cur->Start().Value(), safe_point);
LOperand* operand = cur->CreateAssignedOperand(zone_);
ASSERT(!operand->IsStackSlot());
- map->RecordPointer(operand);
+ map->RecordPointer(operand, zone());
}
}
}
@@ -1632,13 +1633,13 @@ RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
void LAllocator::AddToActive(LiveRange* range) {
TraceAlloc("Add live range %d to active\n", range->id());
- active_live_ranges_.Add(range);
+ active_live_ranges_.Add(range, zone());
}
void LAllocator::AddToInactive(LiveRange* range) {
TraceAlloc("Add live range %d to inactive\n", range->id());
- inactive_live_ranges_.Add(range);
+ inactive_live_ranges_.Add(range, zone());
}
@@ -1649,13 +1650,13 @@ void LAllocator::AddToUnhandledSorted(LiveRange* range) {
LiveRange* cur_range = unhandled_live_ranges_.at(i);
if (range->ShouldBeAllocatedBefore(cur_range)) {
TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
- unhandled_live_ranges_.InsertAt(i + 1, range);
+ unhandled_live_ranges_.InsertAt(i + 1, range, zone());
ASSERT(UnhandledIsSorted());
return;
}
}
TraceAlloc("Add live range %d to unhandled at start\n", range->id());
- unhandled_live_ranges_.InsertAt(0, range);
+ unhandled_live_ranges_.InsertAt(0, range, zone());
ASSERT(UnhandledIsSorted());
}
@@ -1664,7 +1665,7 @@ void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
if (range == NULL || range->IsEmpty()) return;
ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
- unhandled_live_ranges_.Add(range);
+ unhandled_live_ranges_.Add(range, zone());
}
@@ -1705,7 +1706,7 @@ void LAllocator::FreeSpillSlot(LiveRange* range) {
int index = range->TopLevel()->GetSpillOperand()->index();
if (index >= 0) {
- reusable_slots_.Add(range);
+ reusable_slots_.Add(range, zone());
}
}
@@ -1733,7 +1734,7 @@ void LAllocator::ActiveToHandled(LiveRange* range) {
void LAllocator::ActiveToInactive(LiveRange* range) {
ASSERT(active_live_ranges_.Contains(range));
active_live_ranges_.RemoveElement(range);
- inactive_live_ranges_.Add(range);
+ inactive_live_ranges_.Add(range, zone());
TraceAlloc("Moving live range %d from active to inactive\n", range->id());
}
@@ -1749,7 +1750,7 @@ void LAllocator::InactiveToHandled(LiveRange* range) {
void LAllocator::InactiveToActive(LiveRange* range) {
ASSERT(inactive_live_ranges_.Contains(range));
inactive_live_ranges_.RemoveElement(range);
- active_live_ranges_.Add(range);
+ active_live_ranges_.Add(range, zone());
TraceAlloc("Moving live range %d from inactive to active\n", range->id());
}
diff --git a/src/3rdparty/v8/src/lithium-allocator.h b/src/3rdparty/v8/src/lithium-allocator.h
index f5ab055..5b05263 100644
--- a/src/3rdparty/v8/src/lithium-allocator.h
+++ b/src/3rdparty/v8/src/lithium-allocator.h
@@ -48,7 +48,7 @@ class BitVector;
class StringStream;
class LArgument;
-class LChunk;
+class LPlatformChunk;
class LOperand;
class LUnallocated;
class LConstantOperand;
@@ -455,8 +455,9 @@ class LAllocator BASE_EMBEDDED {
return &fixed_double_live_ranges_;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
HGraph* graph() const { return graph_; }
+ Zone* zone() const { return zone_; }
int GetVirtualRegister() {
if (next_virtual_register_ > LUnallocated::kMaxVirtualRegisters) {
@@ -597,7 +598,7 @@ class LAllocator BASE_EMBEDDED {
Zone* zone_;
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
// During liveness analysis keep a mapping from block id to live_in sets
// for blocks already analyzed.
diff --git a/src/3rdparty/v8/src/lithium.cc b/src/3rdparty/v8/src/lithium.cc
index c41cce8..eb2198d 100644
--- a/src/3rdparty/v8/src/lithium.cc
+++ b/src/3rdparty/v8/src/lithium.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,6 +27,23 @@
#include "v8.h"
#include "lithium.h"
+#include "scopes.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+#else
+#error "Unknown architecture."
+#endif
namespace v8 {
namespace internal {
@@ -156,7 +173,7 @@ void LParallelMove::PrintDataTo(StringStream* stream) const {
void LEnvironment::PrintTo(StringStream* stream) {
- stream->Add("[id=%d|", ast_id());
+ stream->Add("[id=%d|", ast_id().ToInt());
stream->Add("[parameters=%d|", parameter_count());
stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
for (int i = 0; i < values_.length(); ++i) {
@@ -171,11 +188,11 @@ void LEnvironment::PrintTo(StringStream* stream) {
}
-void LPointerMap::RecordPointer(LOperand* op) {
+void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- pointer_operands_.Add(op);
+ pointer_operands_.Add(op, zone);
}
@@ -192,11 +209,11 @@ void LPointerMap::RemovePointer(LOperand* op) {
}
-void LPointerMap::RecordUntagged(LOperand* op) {
+void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- untagged_operands_.Add(op);
+ untagged_operands_.Add(op, zone);
}
@@ -225,9 +242,12 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
return 2;
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
return 3;
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
return kPointerSizeLog2;
@@ -237,4 +257,183 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
}
+LLabel* LChunk::GetLabel(int block_id) const {
+ HBasicBlock* block = graph_->blocks()->at(block_id);
+ int first_instruction = block->first_instruction_index();
+ return LLabel::cast(instructions_[first_instruction]);
+}
+
+
+int LChunk::LookupDestination(int block_id) const {
+ LLabel* cur = GetLabel(block_id);
+ while (cur->replacement() != NULL) {
+ cur = cur->replacement();
+ }
+ return cur->block_id();
+}
+
+Label* LChunk::GetAssemblyLabel(int block_id) const {
+ LLabel* label = GetLabel(block_id);
+ ASSERT(!label->HasReplacement());
+ return label->label();
+}
+
+void LChunk::MarkEmptyBlocks() {
+ HPhase phase("L_Mark empty blocks", this);
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ int first = block->first_instruction_index();
+ int last = block->last_instruction_index();
+ LInstruction* first_instr = instructions()->at(first);
+ LInstruction* last_instr = instructions()->at(last);
+
+ LLabel* label = LLabel::cast(first_instr);
+ if (last_instr->IsGoto()) {
+ LGoto* goto_instr = LGoto::cast(last_instr);
+ if (label->IsRedundant() &&
+ !label->is_loop_header()) {
+ bool can_eliminate = true;
+ for (int i = first + 1; i < last && can_eliminate; ++i) {
+ LInstruction* cur = instructions()->at(i);
+ if (cur->IsGap()) {
+ LGap* gap = LGap::cast(cur);
+ if (!gap->IsRedundant()) {
+ can_eliminate = false;
+ }
+ } else {
+ can_eliminate = false;
+ }
+ }
+
+ if (can_eliminate) {
+ label->set_replacement(GetLabel(goto_instr->block_id()));
+ }
+ }
+ }
+ }
+}
+
+
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+ LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
+ int index = -1;
+ if (instr->IsControl()) {
+ instructions_.Add(gap, zone());
+ index = instructions_.length();
+ instructions_.Add(instr, zone());
+ } else {
+ index = instructions_.length();
+ instructions_.Add(instr, zone());
+ instructions_.Add(gap, zone());
+ }
+ if (instr->HasPointerMap()) {
+ pointer_maps_.Add(instr->pointer_map(), zone());
+ instr->pointer_map()->set_lithium_position(index);
+ }
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+ return LConstantOperand::Create(constant->id(), zone());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+ // The receiver is at index 0, the first parameter at index 1, so we
+ // shift all parameter indexes down by the number of parameters, and
+ // make sure they end up negative so they are distinguishable from
+ // spill slots.
+ int result = index - info()->scope()->num_parameters() - 1;
+ ASSERT(result < 0);
+ return result;
+}
+
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+ ASSERT(-1 <= index); // -1 is the receiver.
+ return (1 + info()->scope()->num_parameters() - index) *
+ kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+ return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+ return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+ while (!IsGapAt(index)) index--;
+ return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+ GetGapAt(index)->GetOrCreateParallelMove(
+ LGap::START, zone())->AddMove(from, to, zone());
+}
+
+
+HConstant* LChunk::LookupConstant(LConstantOperand* operand) const {
+ return HConstant::cast(graph_->LookupValue(operand->index()));
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+ LConstantOperand* operand) const {
+ return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunk::NewChunk(HGraph* graph) {
+ NoHandleAllocation no_handles;
+ AssertNoAllocation no_gc;
+
+ int values = graph->GetMaximumValueID();
+ CompilationInfo* info = graph->info();
+ if (values > LUnallocated::kMaxVirtualRegisters) {
+ info->set_bailout_reason("not enough virtual registers for values");
+ return NULL;
+ }
+ LAllocator allocator(values, graph);
+ LChunkBuilder builder(info, graph, &allocator);
+ LChunk* chunk = builder.Build();
+ if (chunk == NULL) return NULL;
+
+ if (!allocator.Allocate(chunk)) {
+ info->set_bailout_reason("not enough virtual registers (regalloc)");
+ return NULL;
+ }
+
+ return chunk;
+}
+
+
+Handle<Code> LChunk::Codegen() {
+ MacroAssembler assembler(info()->isolate(), NULL, 0);
+ LCodeGen generator(this, &assembler, info());
+
+ MarkEmptyBlocks();
+
+ if (generator.GenerateCode()) {
+ if (FLAG_trace_codegen) {
+ PrintF("Crankshaft Compiler - ");
+ }
+ CodeGenerator::MakeCodePrologue(info());
+ Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ Handle<Code> code =
+ CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
+ generator.FinishCode(code);
+ CodeGenerator::PrintCode(code, info());
+ return code;
+ }
+ return Handle<Code>::null();
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/lithium.h b/src/3rdparty/v8/src/lithium.h
index 2ccbf56..b4eb2bb 100644
--- a/src/3rdparty/v8/src/lithium.h
+++ b/src/3rdparty/v8/src/lithium.h
@@ -133,13 +133,15 @@ class LUnallocated: public LOperand {
// index in the upper bits.
static const int kPolicyWidth = 3;
static const int kLifetimeWidth = 1;
- static const int kVirtualRegisterWidth = 18;
+ static const int kVirtualRegisterWidth = 15;
static const int kPolicyShift = kKindFieldWidth;
static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
static const int kFixedIndexShift =
kVirtualRegisterShift + kVirtualRegisterWidth;
+ static const int kFixedIndexWidth = 32 - kFixedIndexShift;
+ STATIC_ASSERT(kFixedIndexWidth > 5);
class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
@@ -154,8 +156,8 @@ class LUnallocated: public LOperand {
};
static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
- static const int kMaxFixedIndex = 63;
- static const int kMinFixedIndex = -64;
+ static const int kMaxFixedIndex = (1 << kFixedIndexWidth) - 1;
+ static const int kMinFixedIndex = -(1 << kFixedIndexWidth);
bool HasAnyPolicy() const {
return policy() == ANY;
@@ -187,8 +189,8 @@ class LUnallocated: public LOperand {
value_ = VirtualRegisterField::update(value_, id);
}
- LUnallocated* CopyUnconstrained() {
- LUnallocated* result = new LUnallocated(ANY);
+ LUnallocated* CopyUnconstrained(Zone* zone) {
+ LUnallocated* result = new(zone) LUnallocated(ANY);
result->set_virtual_register(virtual_register());
return result;
}
@@ -260,10 +262,10 @@ class LMoveOperands BASE_EMBEDDED {
class LConstantOperand: public LOperand {
public:
- static LConstantOperand* Create(int index) {
+ static LConstantOperand* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LConstantOperand(index);
+ return new(zone) LConstantOperand(index);
}
static LConstantOperand* cast(LOperand* op) {
@@ -296,10 +298,10 @@ class LArgument: public LOperand {
class LStackSlot: public LOperand {
public:
- static LStackSlot* Create(int index) {
+ static LStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LStackSlot(index);
+ return new(zone) LStackSlot(index);
}
static LStackSlot* cast(LOperand* op) {
@@ -321,10 +323,10 @@ class LStackSlot: public LOperand {
class LDoubleStackSlot: public LOperand {
public:
- static LDoubleStackSlot* Create(int index) {
+ static LDoubleStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleStackSlot(index);
+ return new(zone) LDoubleStackSlot(index);
}
static LDoubleStackSlot* cast(LOperand* op) {
@@ -346,10 +348,10 @@ class LDoubleStackSlot: public LOperand {
class LRegister: public LOperand {
public:
- static LRegister* Create(int index) {
+ static LRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LRegister(index);
+ return new(zone) LRegister(index);
}
static LRegister* cast(LOperand* op) {
@@ -371,10 +373,10 @@ class LRegister: public LOperand {
class LDoubleRegister: public LOperand {
public:
- static LDoubleRegister* Create(int index) {
+ static LDoubleRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleRegister(index);
+ return new(zone) LDoubleRegister(index);
}
static LDoubleRegister* cast(LOperand* op) {
@@ -396,10 +398,10 @@ class LDoubleRegister: public LOperand {
class LParallelMove : public ZoneObject {
public:
- LParallelMove() : move_operands_(4) { }
+ explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
- void AddMove(LOperand* from, LOperand* to) {
- move_operands_.Add(LMoveOperands(from, to));
+ void AddMove(LOperand* from, LOperand* to, Zone* zone) {
+ move_operands_.Add(LMoveOperands(from, to), zone);
}
bool IsRedundant() const;
@@ -417,9 +419,9 @@ class LParallelMove : public ZoneObject {
class LPointerMap: public ZoneObject {
public:
- explicit LPointerMap(int position)
- : pointer_operands_(8),
- untagged_operands_(0),
+ explicit LPointerMap(int position, Zone* zone)
+ : pointer_operands_(8, zone),
+ untagged_operands_(0, zone),
position_(position),
lithium_position_(-1) { }
@@ -438,9 +440,9 @@ class LPointerMap: public ZoneObject {
lithium_position_ = pos;
}
- void RecordPointer(LOperand* op);
+ void RecordPointer(LOperand* op, Zone* zone);
void RemovePointer(LOperand* op);
- void RecordUntagged(LOperand* op);
+ void RecordUntagged(LOperand* op, Zone* zone);
void PrintTo(StringStream* stream);
private:
@@ -455,11 +457,13 @@ class LEnvironment: public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
FrameType frame_type,
- int ast_id,
+ BailoutId ast_id,
int parameter_count,
int argument_count,
int value_count,
- LEnvironment* outer)
+ LEnvironment* outer,
+ HEnterInlined* entry,
+ Zone* zone)
: closure_(closure),
frame_type_(frame_type),
arguments_stack_height_(argument_count),
@@ -468,18 +472,21 @@ class LEnvironment: public ZoneObject {
ast_id_(ast_id),
parameter_count_(parameter_count),
pc_offset_(-1),
- values_(value_count),
- is_tagged_(value_count, closure->GetHeap()->isolate()->zone()),
+ values_(value_count, zone),
+ is_tagged_(value_count, zone),
+ is_uint32_(value_count, zone),
spilled_registers_(NULL),
spilled_double_registers_(NULL),
- outer_(outer) { }
+ outer_(outer),
+ entry_(entry),
+ zone_(zone) { }
Handle<JSFunction> closure() const { return closure_; }
FrameType frame_type() const { return frame_type_; }
int arguments_stack_height() const { return arguments_stack_height_; }
int deoptimization_index() const { return deoptimization_index_; }
int translation_index() const { return translation_index_; }
- int ast_id() const { return ast_id_; }
+ BailoutId ast_id() const { return ast_id_; }
int parameter_count() const { return parameter_count_; }
int pc_offset() const { return pc_offset_; }
LOperand** spilled_registers() const { return spilled_registers_; }
@@ -488,18 +495,30 @@ class LEnvironment: public ZoneObject {
}
const ZoneList<LOperand*>* values() const { return &values_; }
LEnvironment* outer() const { return outer_; }
+ HEnterInlined* entry() { return entry_; }
- void AddValue(LOperand* operand, Representation representation) {
- values_.Add(operand);
+ void AddValue(LOperand* operand,
+ Representation representation,
+ bool is_uint32) {
+ values_.Add(operand, zone());
if (representation.IsTagged()) {
+ ASSERT(!is_uint32);
is_tagged_.Add(values_.length() - 1);
}
+
+ if (is_uint32) {
+ is_uint32_.Add(values_.length() - 1);
+ }
}
bool HasTaggedValueAt(int index) const {
return is_tagged_.Contains(index);
}
+ bool HasUint32ValueAt(int index) const {
+ return is_uint32_.Contains(index);
+ }
+
void Register(int deoptimization_index,
int translation_index,
int pc_offset) {
@@ -520,17 +539,20 @@ class LEnvironment: public ZoneObject {
void PrintTo(StringStream* stream);
+ Zone* zone() const { return zone_; }
+
private:
Handle<JSFunction> closure_;
FrameType frame_type_;
int arguments_stack_height_;
int deoptimization_index_;
int translation_index_;
- int ast_id_;
+ BailoutId ast_id_;
int parameter_count_;
int pc_offset_;
ZoneList<LOperand*> values_;
BitVector is_tagged_;
+ BitVector is_uint32_;
// Allocation index indexed arrays of spill slot operands for registers
// that are also in spill slots at an OSR entry. NULL for environments
@@ -539,6 +561,9 @@ class LEnvironment: public ZoneObject {
LOperand** spilled_double_registers_;
LEnvironment* outer_;
+ HEnterInlined* entry_;
+
+ Zone* zone_;
};
@@ -616,6 +641,69 @@ class DeepIterator BASE_EMBEDDED {
};
+class LPlatformChunk;
+class LGap;
+class LLabel;
+
+// Superclass providing data and behavior common to all the
+// arch-specific LPlatformChunk classes.
+class LChunk: public ZoneObject {
+ public:
+ static LChunk* NewChunk(HGraph* graph);
+
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ LConstantOperand* DefineConstantOperand(HConstant* constant);
+ HConstant* LookupConstant(LConstantOperand* operand) const;
+ Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+ int ParameterAt(int index);
+ int GetParameterStackSlot(int index) const;
+ int spill_slot_count() const { return spill_slot_count_; }
+ CompilationInfo* info() const { return info_; }
+ HGraph* graph() const { return graph_; }
+ const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+ void AddGapMove(int index, LOperand* from, LOperand* to);
+ LGap* GetGapAt(int index) const;
+ bool IsGapAt(int index) const;
+ int NearestGapPos(int index) const;
+ void MarkEmptyBlocks();
+ const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+ LLabel* GetLabel(int block_id) const;
+ int LookupDestination(int block_id) const;
+ Label* GetAssemblyLabel(int block_id) const;
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+ return &inlined_closures_;
+ }
+
+ void AddInlinedClosure(Handle<JSFunction> closure) {
+ inlined_closures_.Add(closure, zone());
+ }
+
+ Zone* zone() const { return info_->zone(); }
+
+ Handle<Code> Codegen();
+
+ protected:
+ LChunk(CompilationInfo* info, HGraph* graph)
+ : spill_slot_count_(0),
+ info_(info),
+ graph_(graph),
+ instructions_(32, graph->zone()),
+ pointer_maps_(8, graph->zone()),
+ inlined_closures_(1, graph->zone()) { }
+
+ int spill_slot_count_;
+
+ private:
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ ZoneList<LInstruction*> instructions_;
+ ZoneList<LPointerMap*> pointer_maps_;
+ ZoneList<Handle<JSFunction> > inlined_closures_;
+};
+
+
int ElementsKindToShiftSize(ElementsKind elements_kind);
diff --git a/src/3rdparty/v8/src/liveedit-debugger.js b/src/3rdparty/v8/src/liveedit-debugger.js
index 4463c93..cfcdb81 100644
--- a/src/3rdparty/v8/src/liveedit-debugger.js
+++ b/src/3rdparty/v8/src/liveedit-debugger.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,6 +47,8 @@ Debug.LiveEdit = new function() {
// Forward declaration for minifier.
var FunctionStatus;
+ var NEEDS_STEP_IN_PROPERTY_NAME = "stack_update_needs_step_in";
+
// Applies the change to the script.
// The change is in form of list of chunks encoded in a single array as
// a series of triplets (pos1_start, pos1_end, pos2_end)
@@ -161,7 +163,7 @@ Debug.LiveEdit = new function() {
// Our current implementation requires client to manually issue "step in"
// command for correct stack state.
- preview_description.stack_update_needs_step_in =
+ preview_description[NEEDS_STEP_IN_PROPERTY_NAME] =
preview_description.stack_modified;
// Start with breakpoints. Convert their line/column positions and
@@ -1078,6 +1080,18 @@ Debug.LiveEdit = new function() {
return ProcessOldNode(old_code_tree);
}
+ // Restarts call frame and returns value similar to what LiveEdit returns.
+ function RestartFrame(frame_mirror) {
+ var result = frame_mirror.restart();
+ if (IS_STRING(result)) {
+ throw new Failure("Failed to restart frame: " + result);
+ }
+ var result = {};
+ result[NEEDS_STEP_IN_PROPERTY_NAME] = true;
+ return result;
+ }
+ // Function is public.
+ this.RestartFrame = RestartFrame;
// Functions are public for tests.
this.TestApi = {
diff --git a/src/3rdparty/v8/src/liveedit.cc b/src/3rdparty/v8/src/liveedit.cc
index 22b8250..dc7d4b1 100644
--- a/src/3rdparty/v8/src/liveedit.cc
+++ b/src/3rdparty/v8/src/liveedit.cc
@@ -601,7 +601,7 @@ static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
PostponeInterruptsScope postpone(isolate);
// Build AST.
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
info.MarkAsGlobal();
// Parse and don't allow skipping lazy functions.
if (ParserApi::Parse(&info, kNoParsingFlags)) {
@@ -635,6 +635,21 @@ static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
}
+static Handle<SharedFunctionInfo> UnwrapSharedFunctionInfoFromJSValue(
+ Handle<JSValue> jsValue) {
+ Object* shared = jsValue->value();
+ CHECK(shared->IsSharedFunctionInfo());
+ return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(shared));
+}
+
+
+static int GetArrayLength(Handle<JSArray> array) {
+ Object* length = array->length();
+ CHECK(length->IsSmi());
+ return Smi::cast(length)->value();
+}
+
+
// Simple helper class that creates more or less typed structures over
// JSArray object. This is an adhoc method of passing structures from C++
// to JavaScript.
@@ -670,6 +685,7 @@ class JSArrayBasedStruct {
}
int GetSmiValueField(int field_position) {
Object* res = GetField(field_position);
+ CHECK(res->IsSmi());
return Smi::cast(res)->value();
}
@@ -714,14 +730,17 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
return this->GetSmiValueField(kParentIndexOffset_);
}
Handle<Code> GetFunctionCode() {
- Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
- JSValue::cast(this->GetField(kCodeOffset_))));
+ Object* element = this->GetField(kCodeOffset_);
+ CHECK(element->IsJSValue());
+ Handle<JSValue> value_wrapper(JSValue::cast(element));
+ Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
+ CHECK(raw_result->IsCode());
return Handle<Code>::cast(raw_result);
}
Handle<Object> GetCodeScopeInfo() {
- Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
- JSValue::cast(this->GetField(kCodeScopeInfoOffset_))));
- return raw_result;
+ Object* element = this->GetField(kCodeScopeInfoOffset_);
+ CHECK(element->IsJSValue());
+ return UnwrapJSValue(Handle<JSValue>(JSValue::cast(element)));
}
int GetStartPosition() {
return this->GetSmiValueField(kStartPositionOffset_);
@@ -771,9 +790,9 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
}
Handle<SharedFunctionInfo> GetInfo() {
Object* element = this->GetField(kSharedInfoOffset_);
+ CHECK(element->IsJSValue());
Handle<JSValue> value_wrapper(JSValue::cast(element));
- Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
- return Handle<SharedFunctionInfo>::cast(raw_result);
+ return UnwrapSharedFunctionInfoFromJSValue(value_wrapper);
}
private:
@@ -825,7 +844,8 @@ class FunctionInfoListener {
// Saves full information about a function: its code, its scope info
// and a SharedFunctionInfo object.
- void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope) {
+ void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope,
+ Zone* zone) {
if (!shared->IsSharedFunctionInfo()) {
return;
}
@@ -836,14 +856,14 @@ class FunctionInfoListener {
Handle<Object>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
- Handle<Object> scope_info_list(SerializeFunctionScope(scope));
+ Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone));
info.SetOuterScopeInfo(scope_info_list);
}
Handle<JSArray> GetResult() { return result_; }
private:
- Object* SerializeFunctionScope(Scope* scope) {
+ Object* SerializeFunctionScope(Scope* scope, Zone* zone) {
HandleScope handle_scope;
Handle<JSArray> scope_info_list = FACTORY->NewJSArray(10);
@@ -857,8 +877,8 @@ class FunctionInfoListener {
return HEAP->undefined_value();
}
do {
- ZoneList<Variable*> stack_list(outer_scope->StackLocalCount());
- ZoneList<Variable*> context_list(outer_scope->ContextLocalCount());
+ ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone);
+ ZoneList<Variable*> context_list(outer_scope->ContextLocalCount(), zone);
outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
context_list.Sort(&Variable::CompareIndex);
@@ -893,7 +913,6 @@ class FunctionInfoListener {
JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
Isolate* isolate = Isolate::Current();
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
FunctionInfoListener listener;
Handle<Object> original_source = Handle<Object>(script->source());
@@ -909,7 +928,7 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
HandleScope scope;
- int len = Smi::cast(array->length())->value();
+ int len = GetArrayLength(array);
for (int i = 0; i < len; i++) {
Handle<SharedFunctionInfo> info(
SharedFunctionInfo::cast(array->GetElementNoExceptionThrown(i)));
@@ -922,33 +941,35 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
}
-// Visitor that collects all references to a particular code object,
-// including "CODE_TARGET" references in other code objects.
-// It works in context of ZoneScope.
-class ReferenceCollectorVisitor : public ObjectVisitor {
+// Visitor that finds all references to a particular code object,
+// including "CODE_TARGET" references in other code objects and replaces
+// them on the fly.
+class ReplacingVisitor : public ObjectVisitor {
public:
- explicit ReferenceCollectorVisitor(Code* original)
- : original_(original), rvalues_(10), reloc_infos_(10), code_entries_(10) {
+ explicit ReplacingVisitor(Code* original, Code* substitution)
+ : original_(original), substitution_(substitution) {
}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if (*p == original_) {
- rvalues_.Add(p);
+ *p = substitution_;
}
}
}
virtual void VisitCodeEntry(Address entry) {
if (Code::GetObjectFromEntryAddress(entry) == original_) {
- code_entries_.Add(entry);
+ Address substitution_entry = substitution_->instruction_start();
+ Memory::Address_at(entry) = substitution_entry;
}
}
virtual void VisitCodeTarget(RelocInfo* rinfo) {
if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
- reloc_infos_.Add(*rinfo);
+ Address substitution_entry = substitution_->instruction_start();
+ rinfo->set_target_address(substitution_entry);
}
}
@@ -956,56 +977,40 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
VisitCodeTarget(rinfo);
}
- // Post-visiting method that iterates over all collected references and
- // modifies them.
- void Replace(Code* substitution) {
- for (int i = 0; i < rvalues_.length(); i++) {
- *(rvalues_[i]) = substitution;
- }
- Address substitution_entry = substitution->instruction_start();
- for (int i = 0; i < reloc_infos_.length(); i++) {
- reloc_infos_[i].set_target_address(substitution_entry);
- }
- for (int i = 0; i < code_entries_.length(); i++) {
- Address entry = code_entries_[i];
- Memory::Address_at(entry) = substitution_entry;
- }
- }
-
private:
Code* original_;
- ZoneList<Object**> rvalues_;
- ZoneList<RelocInfo> reloc_infos_;
- ZoneList<Address> code_entries_;
+ Code* substitution_;
};
// Finds all references to original and replaces them with substitution.
-static void ReplaceCodeObject(Code* original, Code* substitution) {
- ASSERT(!HEAP->InNewSpace(substitution));
+static void ReplaceCodeObject(Handle<Code> original,
+ Handle<Code> substitution) {
+ // Perform a full GC in order to ensure that we are not in the middle of an
+ // incremental marking phase when we are replacing the code object.
+ // Since we are not in an incremental marking phase we can write pointers
+ // to code objects (that are never in new space) without worrying about
+ // write barriers.
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "liveedit.cc ReplaceCodeObject");
+
+ ASSERT(!HEAP->InNewSpace(*substitution));
- HeapIterator iterator;
AssertNoAllocation no_allocations_please;
- // A zone scope for ReferenceCollectorVisitor.
- ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
-
- ReferenceCollectorVisitor visitor(original);
+ ReplacingVisitor visitor(*original, *substitution);
// Iterate over all roots. Stack frames may have pointer into original code,
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
- {
- HEAP->IterateStrongRoots(&visitor, VISIT_ALL);
- }
+ HEAP->IterateRoots(&visitor, VISIT_ALL);
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.
+ HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
obj->Iterate(&visitor);
}
-
- visitor.Replace(substitution);
}
@@ -1089,8 +1094,8 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
if (IsJSFunctionCode(shared_info->code())) {
Handle<Code> code = compile_info_wrapper.GetFunctionCode();
- ReplaceCodeObject(shared_info->code(), *code);
- Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
+ ReplaceCodeObject(Handle<Code>(shared_info->code()), code);
+ Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
@@ -1140,7 +1145,8 @@ MaybeObject* LiveEdit::FunctionSourceUpdated(
void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
Handle<Object> script_handle) {
Handle<SharedFunctionInfo> shared_info =
- Handle<SharedFunctionInfo>::cast(UnwrapJSValue(function_wrapper));
+ UnwrapSharedFunctionInfoFromJSValue(function_wrapper);
+ CHECK(script_handle->IsScript() || script_handle->IsUndefined());
shared_info->set_script(*script_handle);
Isolate::Current()->compilation_cache()->Remove(shared_info);
@@ -1159,19 +1165,22 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
static int TranslatePosition(int original_position,
Handle<JSArray> position_change_array) {
int position_diff = 0;
- int array_len = Smi::cast(position_change_array->length())->value();
+ int array_len = GetArrayLength(position_change_array);
// TODO(635): binary search may be used here
for (int i = 0; i < array_len; i += 3) {
Object* element = position_change_array->GetElementNoExceptionThrown(i);
+ CHECK(element->IsSmi());
int chunk_start = Smi::cast(element)->value();
if (original_position < chunk_start) {
break;
}
element = position_change_array->GetElementNoExceptionThrown(i + 1);
+ CHECK(element->IsSmi());
int chunk_end = Smi::cast(element)->value();
// Position mustn't be inside a chunk.
ASSERT(original_position >= chunk_end);
element = position_change_array->GetElementNoExceptionThrown(i + 2);
+ CHECK(element->IsSmi());
int chunk_changed_end = Smi::cast(element)->value();
position_diff = chunk_changed_end - chunk_end;
}
@@ -1278,7 +1287,9 @@ static Handle<Code> PatchPositionsInCode(
continue;
}
}
- buffer_writer.Write(it.rinfo());
+ if (RelocInfo::IsRealRelocMode(rinfo->rmode())) {
+ buffer_writer.Write(it.rinfo());
+ }
}
}
@@ -1300,7 +1311,6 @@ static Handle<Code> PatchPositionsInCode(
MaybeObject* LiveEdit::PatchFunctionPositions(
Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
-
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
return Isolate::Current()->ThrowIllegalOperation();
}
@@ -1332,7 +1342,7 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
// on stack (it is safe to substitute the code object on stack, because
// we only change the structure of rinfo and leave instructions
// untouched).
- ReplaceCodeObject(info->code(), *patched_code);
+ ReplaceCodeObject(Handle<Code>(info->code()), patched_code);
}
}
@@ -1390,11 +1400,11 @@ void LiveEdit::ReplaceRefToNestedFunction(
Handle<JSValue> subst_function_wrapper) {
Handle<SharedFunctionInfo> parent_shared =
- Handle<SharedFunctionInfo>::cast(UnwrapJSValue(parent_function_wrapper));
+ UnwrapSharedFunctionInfoFromJSValue(parent_function_wrapper);
Handle<SharedFunctionInfo> orig_shared =
- Handle<SharedFunctionInfo>::cast(UnwrapJSValue(orig_function_wrapper));
+ UnwrapSharedFunctionInfoFromJSValue(orig_function_wrapper);
Handle<SharedFunctionInfo> subst_shared =
- Handle<SharedFunctionInfo>::cast(UnwrapJSValue(subst_function_wrapper));
+ UnwrapSharedFunctionInfoFromJSValue(subst_function_wrapper);
for (RelocIterator it(parent_shared->code()); !it.done(); it.next()) {
if (it.rinfo()->rmode() == RelocInfo::EMBEDDED_OBJECT) {
@@ -1417,12 +1427,13 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
Handle<JSFunction> function(
JSFunction::cast(JavaScriptFrame::cast(frame)->function()));
- int len = Smi::cast(shared_info_array->length())->value();
+ int len = GetArrayLength(shared_info_array);
for (int i = 0; i < len; i++) {
- JSValue* wrapper =
- JSValue::cast(shared_info_array->GetElementNoExceptionThrown(i));
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(wrapper->value()));
+ Object* element = shared_info_array->GetElementNoExceptionThrown(i);
+ CHECK(element->IsJSValue());
+ Handle<JSValue> jsvalue(JSValue::cast(element));
+ Handle<SharedFunctionInfo> shared =
+ UnwrapSharedFunctionInfoFromJSValue(jsvalue);
if (function->shared() == *shared || IsInlined(*function, *shared)) {
SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status)));
@@ -1491,7 +1502,9 @@ static const char* DropFrames(Vector<StackFrame*> frames,
isolate->builtins()->builtin(
Builtins::kFrameDropper_LiveEdit)) {
// OK, we can drop our own code.
- *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
+ pre_top_frame = frames[top_frame_index - 2];
+ top_frame = frames[top_frame_index - 1];
+ *mode = Debug::CURRENTLY_SET_MODE;
frame_has_padding = false;
} else if (pre_top_frame_code ==
isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
@@ -1506,6 +1519,15 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Here the stub is CEntry, it's not debug-only and can't be padded.
// If anyone would complain, a proxy padded stub could be added.
frame_has_padding = false;
+ } else if (pre_top_frame->type() == StackFrame::ARGUMENTS_ADAPTOR) {
+ // This must be adaptor that remain from the frame dropping that
+ // is still on stack. A frame dropper frame must be above it.
+ ASSERT(frames[top_frame_index - 2]->LookupCode() ==
+ isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
+ pre_top_frame = frames[top_frame_index - 3];
+ top_frame = frames[top_frame_index - 2];
+ *mode = Debug::CURRENTLY_SET_MODE;
+ frame_has_padding = false;
} else {
return "Unknown structure of stack above changing function";
}
@@ -1589,16 +1611,36 @@ static bool IsDropableFrame(StackFrame* frame) {
return !frame->is_exit();
}
-// Fills result array with statuses of functions. Modifies the stack
-// removing all listed function if possible and if do_drop is true.
-static const char* DropActivationsInActiveThread(
- Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
+
+// Describes a set of call frames that execute any of listed functions.
+// Finding no such frames does not mean error.
+class MultipleFunctionTarget {
+ public:
+ MultipleFunctionTarget(Handle<JSArray> shared_info_array,
+ Handle<JSArray> result)
+ : m_shared_info_array(shared_info_array),
+ m_result(result) {}
+ bool MatchActivation(StackFrame* frame,
+ LiveEdit::FunctionPatchabilityStatus status) {
+ return CheckActivation(m_shared_info_array, m_result, frame, status);
+ }
+ const char* GetNotFoundMessage() {
+ return NULL;
+ }
+ private:
+ Handle<JSArray> m_shared_info_array;
+ Handle<JSArray> m_result;
+};
+
+// Drops all call frame matched by target and all frames above them.
+template<typename TARGET>
+static const char* DropActivationsInActiveThreadImpl(
+ TARGET& target, bool do_drop, Zone* zone) {
Isolate* isolate = Isolate::Current();
Debug* debug = isolate->debug();
- ZoneScope scope(isolate, DELETE_ON_EXIT);
- Vector<StackFrame*> frames = CreateStackMap();
+ ZoneScope scope(zone, DELETE_ON_EXIT);
+ Vector<StackFrame*> frames = CreateStackMap(zone);
- int array_len = Smi::cast(shared_info_array->length())->value();
int top_frame_index = -1;
int frame_index = 0;
@@ -1608,8 +1650,8 @@ static const char* DropActivationsInActiveThread(
top_frame_index = frame_index;
break;
}
- if (CheckActivation(shared_info_array, result, frame,
- LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
+ if (target.MatchActivation(
+ frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
// We are still above break_frame. It is not a target frame,
// it is a problem.
return "Debugger mark-up on stack is not found";
@@ -1618,7 +1660,7 @@ static const char* DropActivationsInActiveThread(
if (top_frame_index == -1) {
// We haven't found break frame, but no function is blocking us anyway.
- return NULL;
+ return target.GetNotFoundMessage();
}
bool target_frame_found = false;
@@ -1631,8 +1673,8 @@ static const char* DropActivationsInActiveThread(
c_code_found = true;
break;
}
- if (CheckActivation(shared_info_array, result, frame,
- LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
+ if (target.MatchActivation(
+ frame, LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
target_frame_found = true;
bottom_js_frame_index = frame_index;
}
@@ -1644,8 +1686,8 @@ static const char* DropActivationsInActiveThread(
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
if (frame->is_java_script()) {
- if (CheckActivation(shared_info_array, result, frame,
- LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
+ if (target.MatchActivation(
+ frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
// Cannot drop frame under C frames.
return NULL;
}
@@ -1660,7 +1702,7 @@ static const char* DropActivationsInActiveThread(
if (!target_frame_found) {
// Nothing to drop.
- return NULL;
+ return target.GetNotFoundMessage();
}
Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
@@ -1683,6 +1725,23 @@ static const char* DropActivationsInActiveThread(
}
debug->FramesHaveBeenDropped(new_id, drop_mode,
restarter_frame_function_pointer);
+ return NULL;
+}
+
+// Fills result array with statuses of functions. Modifies the stack
+// removing all listed function if possible and if do_drop is true.
+static const char* DropActivationsInActiveThread(
+ Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
+ Zone* zone) {
+ MultipleFunctionTarget target(shared_info_array, result);
+
+ const char* message =
+ DropActivationsInActiveThreadImpl(target, do_drop, zone);
+ if (message) {
+ return message;
+ }
+
+ int array_len = GetArrayLength(shared_info_array);
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
@@ -1723,8 +1782,8 @@ class InactiveThreadActivationsChecker : public ThreadVisitor {
Handle<JSArray> LiveEdit::CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop) {
- int len = Smi::cast(shared_info_array->length())->value();
+ Handle<JSArray> shared_info_array, bool do_drop, Zone* zone) {
+ int len = GetArrayLength(shared_info_array);
Handle<JSArray> result = FACTORY->NewJSArray(len);
@@ -1748,7 +1807,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// Try to drop activations from the current stack.
const char* error_message =
- DropActivationsInActiveThread(shared_info_array, result, do_drop);
+ DropActivationsInActiveThread(shared_info_array, result, do_drop, zone);
if (error_message != NULL) {
// Add error message as an array extra element.
Vector<const char> vector_message(error_message, StrLength(error_message));
@@ -1759,6 +1818,50 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
}
+// Describes a single callframe a target. Not finding this frame
+// means an error.
+class SingleFrameTarget {
+ public:
+ explicit SingleFrameTarget(JavaScriptFrame* frame)
+ : m_frame(frame),
+ m_saved_status(LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH) {}
+
+ bool MatchActivation(StackFrame* frame,
+ LiveEdit::FunctionPatchabilityStatus status) {
+ if (frame->fp() == m_frame->fp()) {
+ m_saved_status = status;
+ return true;
+ }
+ return false;
+ }
+ const char* GetNotFoundMessage() {
+ return "Failed to found requested frame";
+ }
+ LiveEdit::FunctionPatchabilityStatus saved_status() {
+ return m_saved_status;
+ }
+ private:
+ JavaScriptFrame* m_frame;
+ LiveEdit::FunctionPatchabilityStatus m_saved_status;
+};
+
+
+// Finds a drops required frame and all frames above.
+// Returns error message or NULL.
+const char* LiveEdit::RestartFrame(JavaScriptFrame* frame, Zone* zone) {
+ SingleFrameTarget target(frame);
+
+ const char* result = DropActivationsInActiveThreadImpl(target, true, zone);
+ if (result != NULL) {
+ return result;
+ }
+ if (target.saved_status() == LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE) {
+ return "Function is blocked under native code";
+ }
+ return NULL;
+}
+
+
LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
FunctionLiteral* fun)
: isolate_(isolate) {
@@ -1776,9 +1879,11 @@ LiveEditFunctionTracker::~LiveEditFunctionTracker() {
void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
+ Handle<SharedFunctionInfo> info, FunctionLiteral* lit,
+ Zone* zone) {
if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope());
+ isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope(),
+ zone);
}
}
@@ -1807,7 +1912,8 @@ LiveEditFunctionTracker::~LiveEditFunctionTracker() {
void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
+ Handle<SharedFunctionInfo> info, FunctionLiteral* lit,
+ Zone* zone) {
}
diff --git a/src/3rdparty/v8/src/liveedit.h b/src/3rdparty/v8/src/liveedit.h
index 4ee4466..5b12854 100644
--- a/src/3rdparty/v8/src/liveedit.h
+++ b/src/3rdparty/v8/src/liveedit.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -69,7 +69,7 @@ class LiveEditFunctionTracker {
explicit LiveEditFunctionTracker(Isolate* isolate, FunctionLiteral* fun);
~LiveEditFunctionTracker();
void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
- FunctionLiteral* lit);
+ FunctionLiteral* lit, Zone* zone);
void RecordRootFunctionInfo(Handle<Code> code);
static bool IsActive(Isolate* isolate);
@@ -121,7 +121,11 @@ class LiveEdit : AllStatic {
// has restart the lowest found frames and drops all other frames above
// if possible and if do_drop is true.
static Handle<JSArray> CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop);
+ Handle<JSArray> shared_info_array, bool do_drop, Zone* zone);
+
+ // Restarts the call frame and completely drops all frames above it.
+ // Return error message or NULL.
+ static const char* RestartFrame(JavaScriptFrame* frame, Zone* zone);
// A copy of this is in liveedit-debugger.js.
enum FunctionPatchabilityStatus {
diff --git a/src/3rdparty/v8/src/liveobjectlist.cc b/src/3rdparty/v8/src/liveobjectlist.cc
index 1aabc59..6b89cf6 100644
--- a/src/3rdparty/v8/src/liveobjectlist.cc
+++ b/src/3rdparty/v8/src/liveobjectlist.cc
@@ -74,7 +74,7 @@ typedef int (*RawComparer)(const void*, const void*);
v(SeqAsciiString, "unexpected: SeqAsciiString") \
v(SeqString, "unexpected: SeqString") \
v(JSFunctionResultCache, "unexpected: JSFunctionResultCache") \
- v(GlobalContext, "unexpected: GlobalContext") \
+ v(NativeContext, "unexpected: NativeContext") \
v(MapCache, "unexpected: MapCache") \
v(CodeCacheHashTable, "unexpected: CodeCacheHashTable") \
v(CompilationCacheTable, "unexpected: CompilationCacheTable") \
@@ -1951,7 +1951,7 @@ MaybeObject* LiveObjectList::GetObjRetainers(int obj_id,
// Get the constructor function for context extension and arguments array.
JSObject* arguments_boilerplate =
- isolate->context()->global_context()->arguments_boilerplate();
+ isolate->context()->native_context()->arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
diff --git a/src/3rdparty/v8/src/log.cc b/src/3rdparty/v8/src/log.cc
index d93a9d8..b049ffe 100644
--- a/src/3rdparty/v8/src/log.cc
+++ b/src/3rdparty/v8/src/log.cc
@@ -145,7 +145,7 @@ class Profiler: public Thread {
//
// StackTracer implementation
//
-void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
+DISABLE_ASAN void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
ASSERT(isolate->IsInitialized());
// Avoid collecting traces while doing GC.
@@ -526,6 +526,7 @@ Logger::Logger()
name_buffer_(new NameBuffer),
address_to_name_map_(NULL),
is_initialized_(false),
+ code_event_handler_(NULL),
last_address_(NULL),
prev_sp_(NULL),
prev_function_(NULL),
@@ -541,6 +542,52 @@ Logger::~Logger() {
}
+void Logger::IssueCodeAddedEvent(Code* code,
+ const char* name,
+ size_t name_len) {
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_ADDED;
+ event.code_start = code->instruction_start();
+ event.code_len = code->instruction_size();
+ event.name.str = name;
+ event.name.len = name_len;
+
+ code_event_handler_(&event);
+}
+
+
+void Logger::IssueCodeMovedEvent(Address from, Address to) {
+ Code* from_code = Code::cast(HeapObject::FromAddress(from));
+
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_MOVED;
+ event.code_start = from_code->instruction_start();
+ event.code_len = from_code->instruction_size();
+
+ // Calculate the header size.
+ const size_t header_size =
+ from_code->instruction_start() - reinterpret_cast<byte*>(from_code);
+
+ // Calculate the new start address of the instructions.
+ event.new_code_start =
+ reinterpret_cast<byte*>(HeapObject::FromAddress(to)) + header_size;
+
+ code_event_handler_(&event);
+}
+
+
+void Logger::IssueCodeRemovedEvent(Address from) {
+ Code* from_code = Code::cast(HeapObject::FromAddress(from));
+
+ JitCodeEvent event;
+ event.type = JitCodeEvent::CODE_REMOVED;
+ event.code_start = from_code->instruction_start();
+ event.code_len = from_code->instruction_size();
+
+ code_event_handler_(&event);
+}
+
+
#define DECLARE_EVENT(ignore1, name) name,
static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
@@ -864,13 +911,17 @@ void Logger::SetterCallbackEvent(String* name, Address entry_point) {
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
name_buffer_->AppendBytes(comment);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -899,13 +950,17 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
String* name) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
name_buffer_->AppendString(name);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -940,14 +995,18 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
name_buffer_->AppendBytes(ComputeMarker(code));
name_buffer_->AppendString(name);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -981,8 +1040,8 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
@@ -993,6 +1052,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
name_buffer_->AppendByte(':');
name_buffer_->AppendInt(line);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -1022,13 +1085,17 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[tag]);
name_buffer_->AppendByte(':');
name_buffer_->AppendInt(args_count);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -1055,13 +1122,17 @@ void Logger::CodeMovingGCEvent() {
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
- if (!log_->IsEnabled()) return;
- if (FLAG_ll_prof || Serializer::enabled()) {
+ if (!is_logging_code_events()) return;
+ if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) {
name_buffer_->Reset();
name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
name_buffer_->AppendByte(':');
name_buffer_->AppendString(source);
}
+ if (code_event_handler_ != NULL) {
+ IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ }
+ if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
}
@@ -1083,6 +1154,7 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
void Logger::CodeMoveEvent(Address from, Address to) {
+ if (code_event_handler_ != NULL) IssueCodeMovedEvent(from, to);
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
@@ -1093,6 +1165,7 @@ void Logger::CodeMoveEvent(Address from, Address to) {
void Logger::CodeDeleteEvent(Address from) {
+ if (code_event_handler_ != NULL) IssueCodeRemovedEvent(from);
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
@@ -1392,7 +1465,7 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
void Logger::LogCodeObject(Object* object) {
- if (FLAG_log_code || FLAG_ll_prof) {
+ if (FLAG_log_code || FLAG_ll_prof || is_logging_code_events()) {
Code* code_object = Code::cast(object);
LogEventsAndTags tag = Logger::STUB_TAG;
const char* description = "Unknown code from the snapshot";
@@ -1676,6 +1749,18 @@ bool Logger::SetUp() {
}
+void Logger::SetCodeEventHandler(uint32_t options,
+ JitCodeEventHandler event_handler) {
+ code_event_handler_ = event_handler;
+
+ if (code_event_handler_ != NULL && (options & kJitCodeEventEnumExisting)) {
+ HandleScope scope;
+ LogCodeObjects();
+ LogCompiledFunctions();
+ }
+}
+
+
Sampler* Logger::sampler() {
return ticker_;
}
diff --git a/src/3rdparty/v8/src/log.h b/src/3rdparty/v8/src/log.h
index 03c7b3b..33f359a 100644
--- a/src/3rdparty/v8/src/log.h
+++ b/src/3rdparty/v8/src/log.h
@@ -86,6 +86,15 @@ class Ticker;
logger->Call; \
} while (false)
+#define LOG_CODE_EVENT(isolate, Call) \
+ do { \
+ v8::internal::Logger* logger = \
+ (isolate)->logger(); \
+ if (logger->is_logging_code_events()) \
+ logger->Call; \
+ } while (false)
+
+
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation") \
V(CODE_MOVE_EVENT, "code-move") \
@@ -151,6 +160,10 @@ class Logger {
// Acquires resources for logging if the right flags are set.
bool SetUp();
+ // Sets the current code event handler.
+ void SetCodeEventHandler(uint32_t options,
+ JitCodeEventHandler event_handler);
+
void EnsureTickerStarted();
void EnsureTickerStopped();
@@ -274,6 +287,10 @@ class Logger {
return logging_nesting_ > 0;
}
+ bool is_logging_code_events() {
+ return is_logging() || code_event_handler_ != NULL;
+ }
+
// Pause/Resume collection of profiling data.
// When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
@@ -312,6 +329,11 @@ class Logger {
Logger();
~Logger();
+ // Issue code notifications.
+ void IssueCodeAddedEvent(Code* code, const char* name, size_t name_len);
+ void IssueCodeMovedEvent(Address from, Address to);
+ void IssueCodeRemovedEvent(Address from);
+
// Emits the profiler's first message.
void ProfilerBeginEvent();
@@ -413,6 +435,9 @@ class Logger {
// 'true' between SetUp() and TearDown().
bool is_initialized_;
+ // The code event handler - if any.
+ JitCodeEventHandler code_event_handler_;
+
// Support for 'incremental addresses' in compressed logs:
// LogMessageBuilder::AppendAddress(Address addr)
Address last_address_;
diff --git a/src/3rdparty/v8/src/mark-compact-inl.h b/src/3rdparty/v8/src/mark-compact-inl.h
index 2f7e31f..10773e7 100644
--- a/src/3rdparty/v8/src/mark-compact-inl.h
+++ b/src/3rdparty/v8/src/mark-compact-inl.h
@@ -52,32 +52,15 @@ void MarkCompactCollector::SetFlags(int flags) {
}
-bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) {
- if (MarkObjectWithoutPush(obj)) {
- marking_deque_.PushBlack(obj);
- return true;
- }
- return false;
-}
-
-
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
if (!mark_bit.Get()) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
- ProcessNewlyMarkedObject(obj);
- }
-}
-
-
-bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- if (!mark_bit.Get()) {
- SetMark(obj, mark_bit);
- return true;
+ ASSERT(IsMarked(obj));
+ ASSERT(HEAP->Contains(obj));
+ marking_deque_.PushBlack(obj);
}
- return false;
}
@@ -86,9 +69,6 @@ void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
- if (obj->IsMap()) {
- heap_->ClearCacheOnMap(Map::cast(obj));
- }
}
diff --git a/src/3rdparty/v8/src/mark-compact.cc b/src/3rdparty/v8/src/mark-compact.cc
index 82fc1fc..7040728 100644
--- a/src/3rdparty/v8/src/mark-compact.cc
+++ b/src/3rdparty/v8/src/mark-compact.cc
@@ -62,25 +62,24 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
sweep_precisely_(false),
reduce_memory_footprint_(false),
abort_incremental_marking_(false),
+ marking_parity_(ODD_MARKING_PARITY),
compacting_(false),
was_marked_incrementally_(false),
- flush_monomorphic_ics_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
- encountered_weak_maps_(NULL),
- marker_(this, this) { }
+ encountered_weak_maps_(NULL) { }
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
class VerifyMarkingVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
+ CHECK(HEAP->mark_compact_collector()->IsMarked(object));
}
}
}
@@ -97,7 +96,7 @@ static void VerifyMarking(Address bottom, Address top) {
current += kPointerSize) {
object = HeapObject::FromAddress(current);
if (MarkCompactCollector::IsMarked(object)) {
- ASSERT(current >= next_object_must_be_here_or_later);
+ CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
}
@@ -110,12 +109,12 @@ static void VerifyMarking(NewSpace* space) {
NewSpacePageIterator it(space->bottom(), end);
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
- ASSERT_EQ(space->bottom(),
+ CHECK_EQ(space->bottom(),
NewSpacePage::FromAddress(space->bottom())->area_start());
while (it.has_next()) {
NewSpacePage* page = it.next();
Address limit = it.has_next() ? page->area_end() : end;
- ASSERT(limit == end || !page->Contains(end));
+ CHECK(limit == end || !page->Contains(end));
VerifyMarking(page->area_start(), limit);
}
}
@@ -175,7 +174,7 @@ static void VerifyEvacuation(Address bottom, Address top) {
current += kPointerSize) {
object = HeapObject::FromAddress(current);
if (MarkCompactCollector::IsMarked(object)) {
- ASSERT(current >= next_object_must_be_here_or_later);
+ CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
}
@@ -191,7 +190,7 @@ static void VerifyEvacuation(NewSpace* space) {
NewSpacePage* page = it.next();
Address current = page->area_start();
Address limit = it.has_next() ? page->area_end() : space->top();
- ASSERT(limit == space->top() || !page->Contains(space->top()));
+ CHECK(limit == space->top() || !page->Contains(space->top()));
while (current < limit) {
HeapObject* object = HeapObject::FromAddress(current);
object->Iterate(&visitor);
@@ -223,6 +222,101 @@ static void VerifyEvacuation(Heap* heap) {
VerifyEvacuationVisitor visitor;
heap->IterateStrongRoots(&visitor, VISIT_ALL);
}
+#endif // VERIFY_HEAP
+
+
+#ifdef DEBUG
+class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
+ public:
+ VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ if (object->IsString()) continue;
+ switch (object->map()->instance_type()) {
+ case JS_FUNCTION_TYPE:
+ CheckContext(JSFunction::cast(object)->context());
+ break;
+ case JS_GLOBAL_PROXY_TYPE:
+ CheckContext(JSGlobalProxy::cast(object)->native_context());
+ break;
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ CheckContext(GlobalObject::cast(object)->native_context());
+ break;
+ case JS_ARRAY_TYPE:
+ case JS_DATE_TYPE:
+ case JS_OBJECT_TYPE:
+ case JS_REGEXP_TYPE:
+ VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
+ break;
+ case MAP_TYPE:
+ VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
+ VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
+ break;
+ case FIXED_ARRAY_TYPE:
+ if (object->IsContext()) {
+ CheckContext(object);
+ } else {
+ FixedArray* array = FixedArray::cast(object);
+ int length = array->length();
+ // Set array length to zero to prevent cycles while iterating
+ // over array bodies, this is easier than intrusive marking.
+ array->set_length(0);
+ array->IterateBody(
+ FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
+ array->set_length(length);
+ }
+ break;
+ case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ case JS_PROXY_TYPE:
+ case JS_VALUE_TYPE:
+ case TYPE_FEEDBACK_INFO_TYPE:
+ object->Iterate(this);
+ break;
+ case ACCESSOR_INFO_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case CALL_HANDLER_INFO_TYPE:
+ case CODE_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ case HEAP_NUMBER_TYPE:
+ case INTERCEPTOR_INFO_TYPE:
+ case ODDBALL_TYPE:
+ case SCRIPT_TYPE:
+ case SHARED_FUNCTION_INFO_TYPE:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+
+ private:
+ void CheckContext(Object* context) {
+ if (!context->IsContext()) return;
+ Context* native_context = Context::cast(context)->native_context();
+ if (current_native_context_ == NULL) {
+ current_native_context_ = native_context;
+ } else {
+ CHECK_EQ(current_native_context_, native_context);
+ }
+ }
+
+ Context* current_native_context_;
+};
+
+
+static void VerifyNativeContextSeparation(Heap* heap) {
+ HeapObjectIterator it(heap->code_space());
+
+ for (Object* object = it.Next(); object != NULL; object = it.Next()) {
+ VerifyNativeContextSeparationVisitor visitor;
+ Code::cast(object)->CodeIterateBody(&visitor);
+ }
+}
#endif
@@ -248,10 +342,17 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
if (!compacting_) {
ASSERT(evacuation_candidates_.length() == 0);
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ // If GDBJIT interface is active disable compaction.
+ if (FLAG_gdbjit) return false;
+#endif
+
CollectEvacuationCandidates(heap()->old_pointer_space());
CollectEvacuationCandidates(heap()->old_data_space());
- if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) {
+ if (FLAG_compact_code_space &&
+ (mode == NON_INCREMENTAL_COMPACTION ||
+ FLAG_incremental_code_compaction)) {
CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space());
@@ -286,7 +387,7 @@ void MarkCompactCollector::CollectGarbage() {
ClearWeakMaps();
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyMarking(heap_);
}
@@ -296,13 +397,26 @@ void MarkCompactCollector::CollectGarbage() {
if (!FLAG_collect_maps) ReattachInitialMaps();
+#ifdef DEBUG
+ if (FLAG_verify_native_context_separation) {
+ VerifyNativeContextSeparation(heap_);
+ }
+#endif
+
Finish();
+ if (marking_parity_ == EVEN_MARKING_PARITY) {
+ marking_parity_ = ODD_MARKING_PARITY;
+ } else {
+ ASSERT(marking_parity_ == ODD_MARKING_PARITY);
+ marking_parity_ = EVEN_MARKING_PARITY;
+ }
+
tracer_ = NULL;
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
PageIterator it(space);
@@ -313,6 +427,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
}
}
+
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
NewSpacePageIterator it(space->bottom(), space->top());
@@ -323,6 +438,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
}
+
void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_pointer_space());
VerifyMarkbitsAreClean(heap_->old_data_space());
@@ -334,11 +450,11 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(Marking::IsWhite(mark_bit));
- ASSERT_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
}
}
-#endif
+#endif // VERIFY_HEAP
static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
@@ -500,12 +616,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
space->identity() == OLD_DATA_SPACE ||
space->identity() == CODE_SPACE);
+ static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
-
- const int kMaxMaxEvacuationCandidates = 1000;
- int max_evacuation_candidates = Min(
- kMaxMaxEvacuationCandidates,
- static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
+ int max_evacuation_candidates =
+ static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
if (FLAG_stress_compaction || FLAG_always_compact) {
max_evacuation_candidates = kMaxMaxEvacuationCandidates;
@@ -535,25 +649,37 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
intptr_t over_reserved = reserved - space->SizeOfObjects();
static const intptr_t kFreenessThreshold = 50;
- if (over_reserved >= 2 * space->AreaSize() &&
- reduce_memory_footprint_) {
+ if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
+ // If reduction of memory footprint was requested, we are aggressive
+ // about choosing pages to free. We expect that half-empty pages
+ // are easier to compact so slightly bump the limit.
mode = REDUCE_MEMORY_FOOTPRINT;
-
- // We expect that empty pages are easier to compact so slightly bump the
- // limit.
max_evacuation_candidates += 2;
+ }
- if (FLAG_trace_fragmentation) {
- PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
- static_cast<double>(over_reserved) / MB,
- static_cast<int>(kFreenessThreshold));
- }
+
+ if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
+ // If over-usage is very high (more than a third of the space), we
+ // try to free all mostly empty pages. We expect that almost empty
+ // pages are even easier to compact so bump the limit even more.
+ mode = REDUCE_MEMORY_FOOTPRINT;
+ max_evacuation_candidates *= 2;
+ }
+
+ if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
+ PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
+ static_cast<double>(over_reserved) / MB,
+ static_cast<double>(reserved) / MB,
+ static_cast<int>(kFreenessThreshold));
}
intptr_t estimated_release = 0;
Candidate candidates[kMaxMaxEvacuationCandidates];
+ max_evacuation_candidates =
+ Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
+
int count = 0;
int fragmentation = 0;
Candidate* least = NULL;
@@ -566,7 +692,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
p->ClearEvacuationCandidate();
if (FLAG_stress_compaction) {
- int counter = space->heap()->ms_count();
+ unsigned int counter = space->heap()->ms_count();
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
} else if (mode == REDUCE_MEMORY_FOOTPRINT) {
@@ -658,12 +784,6 @@ void MarkCompactCollector::AbortCompaction() {
void MarkCompactCollector::Prepare(GCTracer* tracer) {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
- // Monomorphic ICs are preserved when possible, but need to be flushed
- // when they might be keeping a Context alive, or when the heap is about
- // to be serialized.
- flush_monomorphic_ics_ =
- heap()->isolate()->context_exit_happened() || Serializer::enabled();
-
// Rather than passing the tracer around we stash it in a static member
// variable.
tracer_ = tracer;
@@ -675,13 +795,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit) {
- // If GDBJIT interface is active disable compaction.
- compacting_collection_ = false;
- }
-#endif
-
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && abort_incremental_marking_) {
heap()->incremental_marking()->Abort();
@@ -703,7 +816,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
space->PrepareForMarkCompact();
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (!was_marked_incrementally_ && FLAG_verify_heap) {
VerifyMarkbitsAreClean();
}
@@ -754,133 +867,140 @@ void MarkCompactCollector::Finish() {
// and continue with marking. This process repeats until all reachable
// objects have been marked.
-class CodeFlusher {
- public:
- explicit CodeFlusher(Isolate* isolate)
- : isolate_(isolate),
- jsfunction_candidates_head_(NULL),
- shared_function_info_candidates_head_(NULL) {}
+void CodeFlusher::ProcessJSFunctionCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+ Object* undefined = isolate_->heap()->undefined_value();
- void AddCandidate(SharedFunctionInfo* shared_info) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
- }
+ JSFunction* candidate = jsfunction_candidates_head_;
+ JSFunction* next_candidate;
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+ ClearNextCandidate(candidate, undefined);
- void AddCandidate(JSFunction* function) {
- ASSERT(function->code() == function->shared()->code());
+ SharedFunctionInfo* shared = candidate->shared();
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
+ Code* code = shared->code();
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ if (!code_mark.Get()) {
+ shared->set_code(lazy_compile);
+ candidate->set_code(lazy_compile);
+ } else if (code == lazy_compile) {
+ candidate->set_code(lazy_compile);
+ }
- void ProcessCandidates() {
- ProcessSharedFunctionInfoCandidates();
- ProcessJSFunctionCandidates();
- }
+ // We are in the middle of a GC cycle so the write barrier in the code
+ // setter did not record the slot update and we have to do that manually.
+ Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
+ Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
+ isolate_->heap()->mark_compact_collector()->
+ RecordCodeEntrySlot(slot, target);
- private:
- void ProcessJSFunctionCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+ Object** shared_code_slot =
+ HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
+ candidate = next_candidate;
+ }
- SharedFunctionInfo* shared = candidate->shared();
+ jsfunction_candidates_head_ = NULL;
+}
- Code* code = shared->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
- shared->set_code(lazy_compile);
- candidate->set_code(lazy_compile);
- } else {
- candidate->set_code(shared->code());
- }
- // We are in the middle of a GC cycle so the write barrier in the code
- // setter did not record the slot update and we have to do that manually.
- Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
- isolate_->heap()->mark_compact_collector()->
- RecordCodeEntrySlot(slot, target);
+void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
+ Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
- RecordSharedFunctionInfoCodeSlot(shared);
+ SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+ SharedFunctionInfo* next_candidate;
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+ ClearNextCandidate(candidate);
- candidate = next_candidate;
+ Code* code = candidate->code();
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ if (!code_mark.Get()) {
+ candidate->set_code(lazy_compile);
}
- jsfunction_candidates_head_ = NULL;
+ Object** code_slot =
+ HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(code_slot, code_slot, *code_slot);
+
+ candidate = next_candidate;
}
+ shared_function_info_candidates_head_ = NULL;
+}
+
- void ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+void CodeFlusher::EvictCandidate(JSFunction* function) {
+ ASSERT(!function->next_function_link()->IsUndefined());
+ Object* undefined = isolate_->heap()->undefined_value();
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
+ JSFunction* candidate = jsfunction_candidates_head_;
+ JSFunction* next_candidate;
+ if (candidate == function) {
+ next_candidate = GetNextCandidate(function);
+ jsfunction_candidates_head_ = next_candidate;
+ ClearNextCandidate(function, undefined);
+ } else {
while (candidate != NULL) {
next_candidate = GetNextCandidate(candidate);
- SetNextCandidate(candidate, NULL);
- Code* code = candidate->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
- candidate->set_code(lazy_compile);
+ if (next_candidate == function) {
+ next_candidate = GetNextCandidate(function);
+ SetNextCandidate(candidate, next_candidate);
+ ClearNextCandidate(function, undefined);
}
- RecordSharedFunctionInfoCodeSlot(candidate);
-
candidate = next_candidate;
}
-
- shared_function_info_candidates_head_ = NULL;
}
+}
- void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) {
- Object** slot = HeapObject::RawField(shared,
- SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(slot, slot, HeapObject::cast(*slot));
- }
- static JSFunction** GetNextCandidateField(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- candidate->address() + JSFunction::kCodeEntryOffset);
- }
+void CodeFlusher::EvictJSFunctionCandidates() {
+ Object* undefined = isolate_->heap()->undefined_value();
- static JSFunction* GetNextCandidate(JSFunction* candidate) {
- return *GetNextCandidateField(candidate);
+ JSFunction* candidate = jsfunction_candidates_head_;
+ JSFunction* next_candidate;
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+ ClearNextCandidate(candidate, undefined);
+ candidate = next_candidate;
}
- static void SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate) {
- *GetNextCandidateField(candidate) = next_candidate;
- }
+ jsfunction_candidates_head_ = NULL;
+}
- static SharedFunctionInfo** GetNextCandidateField(
- SharedFunctionInfo* candidate) {
- Code* code = candidate->code();
- return reinterpret_cast<SharedFunctionInfo**>(
- code->address() + Code::kGCMetadataOffset);
- }
- static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
- return reinterpret_cast<SharedFunctionInfo*>(
- candidate->code()->gc_metadata());
+void CodeFlusher::EvictSharedFunctionInfoCandidates() {
+ SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+ SharedFunctionInfo* next_candidate;
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+ ClearNextCandidate(candidate);
+ candidate = next_candidate;
}
- static void SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate) {
- candidate->code()->set_gc_metadata(next_candidate);
- }
+ shared_function_info_candidates_head_ = NULL;
+}
- Isolate* isolate_;
- JSFunction* jsfunction_candidates_head_;
- SharedFunctionInfo* shared_function_info_candidates_head_;
- DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
-};
+void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
+ Heap* heap = isolate_->heap();
+
+ JSFunction** slot = &jsfunction_candidates_head_;
+ JSFunction* candidate = jsfunction_candidates_head_;
+ while (candidate != NULL) {
+ if (heap->InFromSpace(candidate)) {
+ v->VisitPointer(reinterpret_cast<Object**>(slot));
+ }
+ candidate = GetNextCandidate(*slot);
+ slot = GetNextCandidateSlot(*slot);
+ }
+}
MarkCompactCollector::~MarkCompactCollector() {
@@ -927,81 +1047,24 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
}
-class StaticMarkingVisitor : public StaticVisitorBase {
+class MarkCompactMarkingVisitor
+ : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
public:
- static inline void IterateBody(Map* map, HeapObject* obj) {
- table_.GetVisitor(map)(map, obj);
- }
-
- static void Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticMarkingVisitor,
- ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticMarkingVisitor,
- ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticMarkingVisitor,
- SlicedString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticMarkingVisitor,
- FixedArray::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitGlobalContext, &VisitGlobalContext);
+ static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
+ Map* map, HeapObject* obj);
- table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
+ static void ObjectStatsCountFixedArray(
+ FixedArrayBase* fixed_array,
+ FixedArraySubInstanceType fast_type,
+ FixedArraySubInstanceType dictionary_type);
- table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
- table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
-
- table_.Register(kVisitOddball,
- &FixedBodyVisitor<StaticMarkingVisitor,
- Oddball::BodyDescriptor,
- void>::Visit);
- table_.Register(kVisitMap,
- &FixedBodyVisitor<StaticMarkingVisitor,
- Map::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitCode, &VisitCode);
-
- table_.Register(kVisitSharedFunctionInfo,
- &VisitSharedFunctionInfoAndFlushCode);
-
- table_.Register(kVisitJSFunction,
- &VisitJSFunctionAndFlushCode);
-
- table_.Register(kVisitJSRegExp,
- &VisitRegExpAndFlushCode);
-
- table_.Register(kVisitPropertyCell,
- &FixedBodyVisitor<StaticMarkingVisitor,
- JSGlobalPropertyCell::BodyDescriptor,
- void>::Visit);
-
- table_.RegisterSpecializations<DataObjectVisitor,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
-
- table_.RegisterSpecializations<JSObjectVisitor,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
+ template<MarkCompactMarkingVisitor::VisitorId id>
+ class ObjectStatsTracker {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj);
+ };
- table_.RegisterSpecializations<StructObjectVisitor,
- kVisitStruct,
- kVisitStructGeneric>();
- }
+ static void Initialize();
INLINE(static void VisitPointer(Heap* heap, Object** p)) {
MarkObjectByPointer(heap->mark_compact_collector(), p, p);
@@ -1020,48 +1083,21 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
}
- static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(rinfo->target_cell());
- MarkBit mark = Marking::MarkBitFrom(cell);
- heap->mark_compact_collector()->MarkObject(cell, mark);
- }
-
- static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- // TODO(mstarzinger): We do not short-circuit cons strings here, verify
- // that there can be no such embedded pointers and add assertion here.
- HeapObject* object = HeapObject::cast(rinfo->target_object());
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ // Marks the object black and pushes it on the marking stack.
+ INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
MarkBit mark = Marking::MarkBitFrom(object);
heap->mark_compact_collector()->MarkObject(object, mark);
}
- static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
- && (target->ic_state() == MEGAMORPHIC ||
- heap->mark_compact_collector()->flush_monomorphic_ics_ ||
- target->ic_age() != heap->global_ic_age())) {
- IC::Clear(rinfo->pc());
- target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ // Marks the object black without pushing it on the marking stack.
+ // Returns true if object needed marking and false otherwise.
+ INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
+ MarkBit mark_bit = Marking::MarkBitFrom(object);
+ if (!mark_bit.Get()) {
+ heap->mark_compact_collector()->SetMark(object, mark_bit);
+ return true;
}
- MarkBit code_mark = Marking::MarkBitFrom(target);
- heap->mark_compact_collector()->MarkObject(target, code_mark);
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- }
-
- static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- MarkBit code_mark = Marking::MarkBitFrom(target);
- heap->mark_compact_collector()->MarkObject(target, code_mark);
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ return false;
}
// Mark object pointed to by p.
@@ -1116,28 +1152,10 @@ class StaticMarkingVisitor : public StaticVisitorBase {
return true;
}
- static inline void VisitExternalReference(Address* p) { }
- static inline void VisitExternalReference(RelocInfo* rinfo) { }
- static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
-
- private:
- class DataObjectVisitor {
- public:
- template<int size>
- static void VisitSpecialized(Map* map, HeapObject* object) {
- }
-
- static void Visit(Map* map, HeapObject* object) {
- }
- };
-
- typedef FlexibleBodyVisitor<StaticMarkingVisitor,
- JSObject::BodyDescriptor,
- void> JSObjectVisitor;
-
- typedef FlexibleBodyVisitor<StaticMarkingVisitor,
- StructBodyDescriptor,
- void> StructObjectVisitor;
+ INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+ shared->BeforeVisitingPointers();
+ }
static void VisitJSWeakMap(Map* map, HeapObject* object) {
MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
@@ -1151,12 +1169,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Skip visiting the backing hash table containing the mappings.
int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
- BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
+ BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
map->GetHeap(),
object,
JSWeakMap::BodyDescriptor::kStartOffset,
JSWeakMap::kTableOffset);
- BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
+ BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
map->GetHeap(),
object,
JSWeakMap::kTableOffset + kPointerSize,
@@ -1176,136 +1194,14 @@ class StaticMarkingVisitor : public StaticVisitorBase {
ASSERT(MarkCompactCollector::IsMarked(table->map()));
}
- static void VisitCode(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- Code* code = reinterpret_cast<Code*>(object);
- if (FLAG_cleanup_code_caches_at_gc) {
- code->ClearTypeFeedbackCells(heap);
- }
- code->CodeIterateBody<StaticMarkingVisitor>(heap);
- }
+ private:
+ template<int id>
+ static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
// Code flushing support.
- // How many collections newly compiled code object will survive before being
- // flushed.
- static const int kCodeAgeThreshold = 5;
-
static const int kRegExpCodeThreshold = 5;
- inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
- Object* undefined = heap->undefined_value();
- return (info->script() != undefined) &&
- (reinterpret_cast<Script*>(info->script())->source() != undefined);
- }
-
-
- inline static bool IsCompiled(JSFunction* function) {
- return function->code() !=
- function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
- }
-
- inline static bool IsCompiled(SharedFunctionInfo* function) {
- return function->code() !=
- function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
- }
-
- inline static bool IsFlushable(Heap* heap, JSFunction* function) {
- SharedFunctionInfo* shared_info = function->unchecked_shared();
-
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- MarkBit code_mark = Marking::MarkBitFrom(function->code());
- if (code_mark.Get()) {
- if (!Marking::MarkBitFrom(shared_info).Get()) {
- shared_info->set_code_age(0);
- }
- return false;
- }
-
- // We do not flush code for optimized functions.
- if (function->code() != shared_info->code()) {
- return false;
- }
-
- return IsFlushable(heap, shared_info);
- }
-
- inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- MarkBit code_mark =
- Marking::MarkBitFrom(shared_info->code());
- if (code_mark.Get()) {
- return false;
- }
-
- // The function must be compiled and have the source code available,
- // to be able to recompile it in case we need the function again.
- if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
- return false;
- }
-
- // We never flush code for Api functions.
- Object* function_data = shared_info->function_data();
- if (function_data->IsFunctionTemplateInfo()) {
- return false;
- }
-
- // Only flush code for functions.
- if (shared_info->code()->kind() != Code::FUNCTION) {
- return false;
- }
-
- // Function must be lazy compilable.
- if (!shared_info->allows_lazy_compilation()) {
- return false;
- }
-
- // If this is a full script wrapped in a function we do no flush the code.
- if (shared_info->is_toplevel()) {
- return false;
- }
-
- // Age this shared function info.
- if (shared_info->code_age() < kCodeAgeThreshold) {
- shared_info->set_code_age(shared_info->code_age() + 1);
- return false;
- }
-
- return true;
- }
-
-
- static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
- if (!IsFlushable(heap, function)) return false;
-
- // This function's code looks flushable. But we have to postpone the
- // decision until we see all functions that point to the same
- // SharedFunctionInfo because some of them might be optimized.
- // That would make the nonoptimized version of the code nonflushable,
- // because it is required for bailing out from optimized code.
- heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
- return true;
- }
-
- static inline bool IsValidNotBuiltinContext(Object* ctx) {
- return ctx->IsContext() &&
- !Context::cast(ctx)->global()->IsJSBuiltinsObject();
- }
-
-
- static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
-
- if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
-
- FixedBodyVisitor<StaticMarkingVisitor,
- SharedFunctionInfo::BodyDescriptor,
- void>::Visit(map, object);
- }
-
-
static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
JSRegExp* re,
bool is_ascii) {
@@ -1368,7 +1264,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap();
MarkCompactCollector* collector = heap->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
- VisitJSRegExpFields(map, object);
+ VisitJSRegExp(map, object);
return;
}
JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
@@ -1376,183 +1272,161 @@ class StaticMarkingVisitor : public StaticVisitorBase {
UpdateRegExpCodeAgeAndFlush(heap, re, true);
UpdateRegExpCodeAgeAndFlush(heap, re, false);
// Visit the fields of the RegExp, including the updated FixedArray.
- VisitJSRegExpFields(map, object);
+ VisitJSRegExp(map, object);
}
+ static VisitorDispatchTable<Callback> non_count_table_;
+};
- static void VisitSharedFunctionInfoAndFlushCode(Map* map,
- HeapObject* object) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
- if (shared->ic_age() != heap->global_ic_age()) {
- shared->ResetForNewContext(heap->global_ic_age());
- }
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- if (!collector->is_code_flushing_enabled()) {
- VisitSharedFunctionInfoGeneric(map, object);
- return;
+void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
+ FixedArrayBase* fixed_array,
+ FixedArraySubInstanceType fast_type,
+ FixedArraySubInstanceType dictionary_type) {
+ Heap* heap = fixed_array->map()->GetHeap();
+ if (fixed_array->map() != heap->fixed_cow_array_map() &&
+ fixed_array->map() != heap->fixed_double_array_map() &&
+ fixed_array != heap->empty_fixed_array()) {
+ if (fixed_array->IsDictionary()) {
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ dictionary_type,
+ fixed_array->Size());
+ } else {
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ fast_type,
+ fixed_array->Size());
}
- VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
}
+}
- static void VisitSharedFunctionInfoAndFlushCodeGeneric(
- Map* map, HeapObject* object, bool known_flush_code_candidate) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
-
- if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
-
- if (!known_flush_code_candidate) {
- known_flush_code_candidate = IsFlushable(heap, shared);
- if (known_flush_code_candidate) {
- heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
- }
- }
-
- VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
+void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
+ MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ int object_size = obj->Size();
+ heap->RecordObjectStats(map->instance_type(), -1, object_size);
+ non_count_table_.GetVisitorById(id)(map, obj);
+ if (obj->IsJSObject()) {
+ JSObject* object = JSObject::cast(obj);
+ ObjectStatsCountFixedArray(object->elements(),
+ DICTIONARY_ELEMENTS_SUB_TYPE,
+ FAST_ELEMENTS_SUB_TYPE);
+ ObjectStatsCountFixedArray(object->properties(),
+ DICTIONARY_PROPERTIES_SUB_TYPE,
+ FAST_PROPERTIES_SUB_TYPE);
}
+}
- static void VisitCodeEntry(Heap* heap, Address entry_address) {
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- MarkBit mark = Marking::MarkBitFrom(code);
- heap->mark_compact_collector()->MarkObject(code, mark);
- heap->mark_compact_collector()->
- RecordCodeEntrySlot(entry_address, code);
- }
-
- static void VisitGlobalContext(Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticMarkingVisitor,
- Context::MarkCompactBodyDescriptor,
- void>::Visit(map, object);
+template<MarkCompactMarkingVisitor::VisitorId id>
+void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
+ Map* map, HeapObject* obj) {
+ ObjectStatsVisitBase(id, map, obj);
+}
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- for (int idx = Context::FIRST_WEAK_SLOT;
- idx < Context::GLOBAL_CONTEXT_SLOTS;
- ++idx) {
- Object** slot =
- HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
- collector->RecordSlot(slot, slot, *slot);
- }
- }
- static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
+template<>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+ MarkCompactMarkingVisitor::kVisitMap> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
Heap* heap = map->GetHeap();
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (!collector->is_code_flushing_enabled()) {
- VisitJSFunction(map, object);
- return;
- }
-
- JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
- // The function must have a valid context and not be a builtin.
- bool flush_code_candidate = false;
- if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
- flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
- }
-
- if (!flush_code_candidate) {
- Code* code = jsfunction->shared()->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- collector->MarkObject(code, code_mark);
-
- if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
- collector->MarkInlinedFunctionsCode(jsfunction->code());
- }
- }
-
- VisitJSFunctionFields(map,
- reinterpret_cast<JSFunction*>(object),
- flush_code_candidate);
+ Map* map_obj = Map::cast(obj);
+ ASSERT(map->instance_type() == MAP_TYPE);
+ DescriptorArray* array = map_obj->instance_descriptors();
+ if (map_obj->owns_descriptors() &&
+ array != heap->empty_descriptor_array()) {
+ int fixed_array_size = array->Size();
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ DESCRIPTOR_ARRAY_SUB_TYPE,
+ fixed_array_size);
+ }
+ if (map_obj->HasTransitionArray()) {
+ int fixed_array_size = map_obj->transitions()->Size();
+ heap->RecordObjectStats(FIXED_ARRAY_TYPE,
+ TRANSITION_ARRAY_SUB_TYPE,
+ fixed_array_size);
+ }
+ if (map_obj->code_cache() != heap->empty_fixed_array()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ MAP_CODE_CACHE_SUB_TYPE,
+ FixedArray::cast(map_obj->code_cache())->Size());
+ }
+ ObjectStatsVisitBase(kVisitMap, map, obj);
}
+};
- static void VisitJSFunction(Map* map, HeapObject* object) {
- VisitJSFunctionFields(map,
- reinterpret_cast<JSFunction*>(object),
- false);
+template<>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+ MarkCompactMarkingVisitor::kVisitCode> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ int object_size = obj->Size();
+ ASSERT(map->instance_type() == CODE_TYPE);
+ heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
+ ObjectStatsVisitBase(kVisitCode, map, obj);
}
+};
-#define SLOT_ADDR(obj, offset) \
- reinterpret_cast<Object**>((obj)->address() + offset)
-
-
- static inline void VisitJSFunctionFields(Map* map,
- JSFunction* object,
- bool flush_code_candidate) {
+template<>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+ MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
Heap* heap = map->GetHeap();
-
- VisitPointers(heap,
- HeapObject::RawField(object, JSFunction::kPropertiesOffset),
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
-
- if (!flush_code_candidate) {
- VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
- } else {
- // Don't visit code object.
-
- // Visit shared function info to avoid double checking of it's
- // flushability.
- SharedFunctionInfo* shared_info = object->unchecked_shared();
- MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
- if (!shared_info_mark.Get()) {
- Map* shared_info_map = shared_info->map();
- MarkBit shared_info_map_mark =
- Marking::MarkBitFrom(shared_info_map);
- heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
- heap->mark_compact_collector()->MarkObject(shared_info_map,
- shared_info_map_mark);
- VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
- shared_info,
- true);
- }
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ if (sfi->scope_info() != heap->empty_fixed_array()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ SCOPE_INFO_SUB_TYPE,
+ FixedArray::cast(sfi->scope_info())->Size());
}
-
- VisitPointers(
- heap,
- HeapObject::RawField(object,
- JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(object,
- JSFunction::kNonWeakFieldsEndOffset));
+ ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
}
+};
- static inline void VisitJSRegExpFields(Map* map,
- HeapObject* object) {
- int last_property_offset =
- JSRegExp::kSize + kPointerSize * map->inobject_properties();
- VisitPointers(map->GetHeap(),
- SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
- SLOT_ADDR(object, last_property_offset));
- }
-
-
- static void VisitSharedFunctionInfoFields(Heap* heap,
- HeapObject* object,
- bool flush_code_candidate) {
- VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
- if (!flush_code_candidate) {
- VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
+template<>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+ MarkCompactMarkingVisitor::kVisitFixedArray> {
+ public:
+ static inline void Visit(Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ FixedArray* fixed_array = FixedArray::cast(obj);
+ if (fixed_array == heap->symbol_table()) {
+ heap->RecordObjectStats(
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TABLE_SUB_TYPE,
+ fixed_array->Size());
}
-
- VisitPointers(heap,
- SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
- SLOT_ADDR(object, SharedFunctionInfo::kSize));
+ ObjectStatsVisitBase(kVisitFixedArray, map, obj);
}
+};
- #undef SLOT_ADDR
- typedef void (*Callback)(Map* map, HeapObject* object);
+void MarkCompactMarkingVisitor::Initialize() {
+ StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
- static VisitorDispatchTable<Callback> table_;
-};
+ table_.Register(kVisitJSRegExp,
+ &VisitRegExpAndFlushCode);
+
+ if (FLAG_track_gc_object_stats) {
+ // Copy the visitor table to make call-through possible.
+ non_count_table_.CopyFrom(&table_);
+#define VISITOR_ID_COUNT_FUNCTION(id) \
+ table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
+ VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
+#undef VISITOR_ID_COUNT_FUNCTION
+ }
+}
-VisitorDispatchTable<StaticMarkingVisitor::Callback>
- StaticMarkingVisitor::table_;
+VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
+ MarkCompactMarkingVisitor::non_count_table_;
class MarkingVisitor : public ObjectVisitor {
@@ -1560,11 +1434,11 @@ class MarkingVisitor : public ObjectVisitor {
explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
void VisitPointer(Object** p) {
- StaticMarkingVisitor::VisitPointer(heap_, p);
+ MarkCompactMarkingVisitor::VisitPointer(heap_, p);
}
void VisitPointers(Object** start, Object** end) {
- StaticMarkingVisitor::VisitPointers(heap_, start, end);
+ MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
}
private:
@@ -1611,26 +1485,6 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
};
-void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
- // For optimized functions we should retain both non-optimized version
- // of it's code and non-optimized version of all inlined functions.
- // This is required to support bailing out from inlined code.
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
-
- FixedArray* literals = data->LiteralArray();
-
- for (int i = 0, count = data->InlinedFunctionCount()->value();
- i < count;
- i++) {
- JSFunction* inlined = JSFunction::cast(literals->get(i));
- Code* inlined_code = inlined->shared()->code();
- MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
- MarkObject(inlined_code, inlined_code_mark);
- }
-}
-
-
void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
ThreadLocalTop* top) {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
@@ -1643,7 +1497,8 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
MarkBit code_mark = Marking::MarkBitFrom(code);
MarkObject(code, code_mark);
if (frame->is_optimized()) {
- MarkInlinedFunctionsCode(frame->LookupCode());
+ MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
+ frame->LookupCode());
}
}
}
@@ -1652,21 +1507,13 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
void MarkCompactCollector::PrepareForCodeFlushing() {
ASSERT(heap() == Isolate::Current()->heap());
- // TODO(1609) Currently incremental marker does not support code flushing.
- if (!FLAG_flush_code || was_marked_incrementally_) {
- EnableCodeFlushing(false);
- return;
+ // Enable code flushing for non-incremental cycles.
+ if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
+ EnableCodeFlushing(!was_marked_incrementally_);
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (heap()->isolate()->debug()->IsLoaded() ||
- heap()->isolate()->debug()->has_break_points()) {
- EnableCodeFlushing(false);
- return;
- }
-#endif
-
- EnableCodeFlushing(true);
+ // If code flushing is disabled, there is no need to prepare for it.
+ if (!is_code_flushing_enabled()) return;
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray
// relies on it being marked before any other descriptor array.
@@ -1723,7 +1570,7 @@ class RootMarkingVisitor : public ObjectVisitor {
// Mark the map pointer and body, and push them on the marking stack.
MarkBit map_mark = Marking::MarkBitFrom(map);
collector_->MarkObject(map, map_mark);
- StaticMarkingVisitor::IterateBody(map, object);
+ MarkCompactMarkingVisitor::IterateBody(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
@@ -1786,151 +1633,6 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
};
-void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
- ASSERT(IsMarked(object));
- ASSERT(HEAP->Contains(object));
- if (object->IsMap()) {
- Map* map = Map::cast(object);
- heap_->ClearCacheOnMap(map);
-
- // When map collection is enabled we have to mark through map's transitions
- // in a special way to make transition links weak. Only maps for subclasses
- // of JSReceiver can have transitions.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
- marker_.MarkMapContents(map);
- } else {
- marking_deque_.PushBlack(map);
- }
- } else {
- marking_deque_.PushBlack(object);
- }
-}
-
-
-// Force instantiation of template instances.
-template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
-template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
-
-
-template <class T>
-void Marker<T>::MarkMapContents(Map* map) {
- // Mark prototype transitions array but don't push it into marking stack.
- // This will make references from it weak. We will clean dead prototype
- // transitions in ClearNonLiveTransitions.
- Object** proto_trans_slot =
- HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
- HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
- if (prototype_transitions->IsFixedArray()) {
- mark_compact_collector()->RecordSlot(proto_trans_slot,
- proto_trans_slot,
- prototype_transitions);
- MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
- if (!mark.Get()) {
- mark.Set();
- MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
- prototype_transitions->Size());
- }
- }
-
- // Make sure that the back pointer stored either in the map itself or inside
- // its prototype transitions array is marked. Treat pointers in the descriptor
- // array as weak and also mark that array to prevent visiting it later.
- base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
-
- Object** descriptor_array_slot =
- HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
- Object* descriptor_array = *descriptor_array_slot;
- if (!descriptor_array->IsSmi()) {
- MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
- }
-
- // Mark the Object* fields of the Map. Since the descriptor array has been
- // marked already, it is fine that one of these fields contains a pointer
- // to it. But make sure to skip back pointer and prototype transitions.
- STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
- Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
- Object** start_slot = HeapObject::RawField(
- map, Map::kPointerFieldsBeginOffset);
- Object** end_slot = HeapObject::RawField(
- map, Map::kPrototypeTransitionsOrBackPointerOffset);
- for (Object** slot = start_slot; slot < end_slot; slot++) {
- Object* obj = *slot;
- if (!obj->NonFailureIsHeapObject()) continue;
- mark_compact_collector()->RecordSlot(start_slot, slot, obj);
- base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
- }
-}
-
-
-template <class T>
-void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
- // Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
-
- // The DescriptorArray contains a pointer to its contents array, but the
- // contents array will be marked black and hence not be visited again.
- if (!base_marker()->MarkObjectAndPush(descriptors)) return;
- FixedArray* contents = FixedArray::cast(
- descriptors->get(DescriptorArray::kContentArrayIndex));
- ASSERT(contents->length() >= 2);
- ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents)));
- base_marker()->MarkObjectWithoutPush(contents);
-
- // Contents contains (value, details) pairs. If the descriptor contains a
- // transition (value is a Map), we don't mark the value as live. It might
- // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later.
- for (int i = 0; i < contents->length(); i += 2) {
- PropertyDetails details(Smi::cast(contents->get(i + 1)));
-
- Object** slot = contents->data_start() + i;
- if (!(*slot)->IsHeapObject()) continue;
- HeapObject* value = HeapObject::cast(*slot);
-
- mark_compact_collector()->RecordSlot(slot, slot, *slot);
-
- switch (details.type()) {
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- base_marker()->MarkObjectAndPush(value);
- break;
- case CALLBACKS:
- if (!value->IsAccessorPair()) {
- base_marker()->MarkObjectAndPush(value);
- } else if (base_marker()->MarkObjectWithoutPush(value)) {
- AccessorPair* accessors = AccessorPair::cast(value);
- MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
- MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
- }
- break;
- case ELEMENTS_TRANSITION:
- // For maps with multiple elements transitions, the transition maps are
- // stored in a FixedArray. Keep the fixed array alive but not the maps
- // that it refers to.
- if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value);
- break;
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- break;
- }
- }
-}
-
-
-template <class T>
-void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
- Object** slot = HeapObject::RawField(accessors, offset);
- HeapObject* accessor = HeapObject::cast(*slot);
- if (accessor->IsMap()) return;
- mark_compact_collector()->RecordSlot(slot, slot, accessor);
- base_marker()->MarkObjectAndPush(accessor);
-}
-
-
// Fill the marking stack with overflowed objects returned by the given
// iterator. Stop when the marking stack is filled or the end of the space
// is reached, whichever comes first.
@@ -2167,7 +1869,7 @@ void MarkCompactCollector::EmptyMarkingDeque() {
MarkBit map_mark = Marking::MarkBitFrom(map);
MarkObject(map, map_mark);
- StaticMarkingVisitor::IterateBody(map, object);
+ MarkCompactMarkingVisitor::IterateBody(map, object);
}
// Process encountered weak maps, mark objects only reachable by those
@@ -2264,7 +1966,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// non-incremental marker can deal with them as if overflow
// occured during normal marking.
// But incremental marker uses a separate marking deque
- // so we have to explicitly copy it's overflow state.
+ // so we have to explicitly copy its overflow state.
incremental_marking->Finalize();
incremental_marking_overflowed =
incremental_marking->marking_deque()->overflowed();
@@ -2306,7 +2008,7 @@ void MarkCompactCollector::MarkLiveObjects() {
ASSERT(cell->IsJSGlobalPropertyCell());
if (IsMarked(cell)) {
int offset = JSGlobalPropertyCell::kValueOffset;
- StaticMarkingVisitor::VisitPointer(
+ MarkCompactMarkingVisitor::VisitPointer(
heap(),
reinterpret_cast<Object**>(cell->address() + offset));
}
@@ -2373,17 +2075,26 @@ void MarkCompactCollector::AfterMarking() {
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
code_flusher_->ProcessCandidates();
+ // If incremental marker does not support code flushing, we need to
+ // disable it before incremental marking steps for next cycle.
+ if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
+ EnableCodeFlushing(false);
+ }
}
if (!FLAG_watch_ic_patching) {
// Clean up dead objects from the runtime profiler.
heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
}
+
+ if (FLAG_track_gc_object_stats) {
+ heap()->CheckpointObjectStats();
+ }
}
void MarkCompactCollector::ProcessMapCaches() {
- Object* raw_context = heap()->global_contexts_list_;
+ Object* raw_context = heap()->native_contexts_list_;
while (raw_context != heap()->undefined_value()) {
Context* context = reinterpret_cast<Context*>(raw_context);
if (IsMarked(context)) {
@@ -2483,7 +2194,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
int number_of_transitions = map->NumberOfProtoTransitions();
- FixedArray* prototype_transitions = map->prototype_transitions();
+ FixedArray* prototype_transitions = map->GetPrototypeTransitions();
int new_number_of_transitions = 0;
const int header = Map::kProtoTransitionHeaderSize;
@@ -2561,7 +2272,8 @@ void MarkCompactCollector::ProcessWeakMaps() {
Object** value_slot =
HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
ObjectHashTable::EntryToValueIndex(i)));
- StaticMarkingVisitor::MarkObjectByPointer(this, anchor, value_slot);
+ MarkCompactMarkingVisitor::MarkObjectByPointer(
+ this, anchor, value_slot);
}
}
weak_map_obj = weak_map->next();
@@ -2675,15 +2387,33 @@ class PointersUpdatingVisitor: public ObjectVisitor {
void VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
Object* target = rinfo->target_object();
+ Object* old_target = target;
VisitPointer(&target);
- rinfo->set_target_object(target);
+ // Avoid unnecessary changes that might unnecessary flush the instruction
+ // cache.
+ if (target != old_target) {
+ rinfo->set_target_object(target);
+ }
}
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Object* old_target = target;
VisitPointer(&target);
- rinfo->set_target_address(Code::cast(target)->instruction_start());
+ if (target != old_target) {
+ rinfo->set_target_address(Code::cast(target)->instruction_start());
+ }
+ }
+
+ void VisitCodeAgeSequence(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Object* stub = rinfo->code_age_stub();
+ ASSERT(stub != NULL);
+ VisitPointer(&stub);
+ if (stub != rinfo->code_age_stub()) {
+ rinfo->set_code_age_stub(Code::cast(stub));
+ }
}
void VisitDebugTarget(RelocInfo* rinfo) {
@@ -2739,7 +2469,9 @@ static void UpdatePointer(HeapObject** p, HeapObject* object) {
// We have to zap this pointer, because the store buffer may overflow later,
// and then we have to scan the entire heap and we don't want to find
// spurious newspace pointers in the old space.
- *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
+ // TODO(mstarzinger): This was changed to a sentinel value to track down
+ // rare crashes, change it back to Smi::FromInt(0) later.
+ *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
}
}
@@ -3221,6 +2953,8 @@ void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+ Heap::RelocationLock relocation_lock(heap());
+
bool code_slots_filtering_required;
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
code_slots_filtering_required = MarkInvalidatedCode();
@@ -3359,8 +3093,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
}
- // Update pointer from the global contexts list.
- updating_visitor.VisitPointer(heap_->global_contexts_list_address());
+ // Update pointer from the native contexts list.
+ updating_visitor.VisitPointer(heap_->native_contexts_list_address());
heap_->symbol_table()->Iterate(&updating_visitor);
@@ -3383,7 +3117,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyEvacuation(heap_);
}
@@ -3795,11 +3529,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
bool lazy_sweeping_active = false;
bool unused_page_present = false;
- intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
- intptr_t space_left =
- Min(heap()->OldGenPromotionLimit(old_space_size),
- heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
-
while (it.has_next()) {
Page* p = it.next();
@@ -3859,7 +3588,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
}
freed_bytes += SweepConservatively(space, p);
pages_swept++;
- if (space_left + freed_bytes > newspace_size) {
+ if (freed_bytes > 2 * newspace_size) {
space->SetPagesToSweep(p->next_page());
lazy_sweeping_active = true;
} else {
@@ -3935,11 +3664,19 @@ void MarkCompactCollector::SweepSpaces() {
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (heap()->isolate()->debug()->IsLoaded() ||
+ heap()->isolate()->debug()->has_break_points()) {
+ enable = false;
+ }
+#endif
+
if (enable) {
if (code_flusher_ != NULL) return;
code_flusher_ = new CodeFlusher(heap()->isolate());
} else {
if (code_flusher_ == NULL) return;
+ code_flusher_->EvictAllCandidates();
delete code_flusher_;
code_flusher_ = NULL;
}
@@ -3963,7 +3700,8 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
void MarkCompactCollector::Initialize() {
- StaticMarkingVisitor::Initialize();
+ MarkCompactMarkingVisitor::Initialize();
+ IncrementalMarking::Initialize();
}
@@ -4039,6 +3777,20 @@ void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
}
+void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
+ ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
+ if (is_compacting()) {
+ Code* host = heap()->isolate()->inner_pointer_to_code_cache()->
+ GcSafeFindCodeForInnerPointer(pc);
+ MarkBit mark_bit = Marking::MarkBitFrom(host);
+ if (Marking::IsBlack(mark_bit)) {
+ RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RecordRelocSlot(&rinfo, target);
+ }
+ }
+}
+
+
static inline SlotsBuffer::SlotType DecodeSlotType(
SlotsBuffer::ObjectSlot slot) {
return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
diff --git a/src/3rdparty/v8/src/mark-compact.h b/src/3rdparty/v8/src/mark-compact.h
index dbc2869..0a4c1ea 100644
--- a/src/3rdparty/v8/src/mark-compact.h
+++ b/src/3rdparty/v8/src/mark-compact.h
@@ -304,6 +304,26 @@ class SlotsBuffer {
NUMBER_OF_SLOT_TYPES
};
+ static const char* SlotTypeToString(SlotType type) {
+ switch (type) {
+ case EMBEDDED_OBJECT_SLOT:
+ return "EMBEDDED_OBJECT_SLOT";
+ case RELOCATED_CODE_OBJECT:
+ return "RELOCATED_CODE_OBJECT";
+ case CODE_TARGET_SLOT:
+ return "CODE_TARGET_SLOT";
+ case CODE_ENTRY_SLOT:
+ return "CODE_ENTRY_SLOT";
+ case DEBUG_TARGET_SLOT:
+ return "DEBUG_TARGET_SLOT";
+ case JS_RETURN_SLOT:
+ return "JS_RETURN_SLOT";
+ case NUMBER_OF_SLOT_TYPES:
+ return "NUMBER_OF_SLOT_TYPES";
+ }
+ return "UNKNOWN SlotType";
+ }
+
void UpdateSlots(Heap* heap);
void UpdateSlotsWithFilter(Heap* heap);
@@ -383,31 +403,96 @@ class SlotsBuffer {
};
-// -------------------------------------------------------------------------
-// Marker shared between incremental and non-incremental marking
-template<class BaseMarker> class Marker {
+// CodeFlusher collects candidates for code flushing during marking and
+// processes those candidates after marking has completed in order to
+// reset those functions referencing code objects that would otherwise
+// be unreachable. Code objects can be referenced in two ways:
+// - SharedFunctionInfo references unoptimized code.
+// - JSFunction references either unoptimized or optimized code.
+// We are not allowed to flush unoptimized code for functions that got
+// optimized or inlined into optimized code, because we might bailout
+// into the unoptimized code again during deoptimization.
+class CodeFlusher {
public:
- Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector)
- : base_marker_(base_marker),
- mark_compact_collector_(mark_compact_collector) {}
+ explicit CodeFlusher(Isolate* isolate)
+ : isolate_(isolate),
+ jsfunction_candidates_head_(NULL),
+ shared_function_info_candidates_head_(NULL) {}
+
+ void AddCandidate(SharedFunctionInfo* shared_info) {
+ if (GetNextCandidate(shared_info) == NULL) {
+ SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+ shared_function_info_candidates_head_ = shared_info;
+ }
+ }
- // Mark pointers in a Map and its DescriptorArray together, possibly
- // treating transitions or back pointers weak.
- void MarkMapContents(Map* map);
- void MarkDescriptorArray(DescriptorArray* descriptors);
- void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
+ void AddCandidate(JSFunction* function) {
+ ASSERT(function->code() == function->shared()->code());
+ if (GetNextCandidate(function)->IsUndefined()) {
+ SetNextCandidate(function, jsfunction_candidates_head_);
+ jsfunction_candidates_head_ = function;
+ }
+ }
+
+ void EvictCandidate(JSFunction* function);
+
+ void ProcessCandidates() {
+ ProcessSharedFunctionInfoCandidates();
+ ProcessJSFunctionCandidates();
+ }
+
+ void EvictAllCandidates() {
+ EvictJSFunctionCandidates();
+ EvictSharedFunctionInfoCandidates();
+ }
+
+ void IteratePointersToFromSpace(ObjectVisitor* v);
private:
- BaseMarker* base_marker() {
- return base_marker_;
+ void ProcessJSFunctionCandidates();
+ void ProcessSharedFunctionInfoCandidates();
+ void EvictJSFunctionCandidates();
+ void EvictSharedFunctionInfoCandidates();
+
+ static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
+ return reinterpret_cast<JSFunction**>(
+ HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
+ }
+
+ static JSFunction* GetNextCandidate(JSFunction* candidate) {
+ Object* next_candidate = candidate->next_function_link();
+ return reinterpret_cast<JSFunction*>(next_candidate);
+ }
+
+ static void SetNextCandidate(JSFunction* candidate,
+ JSFunction* next_candidate) {
+ candidate->set_next_function_link(next_candidate);
}
- MarkCompactCollector* mark_compact_collector() {
- return mark_compact_collector_;
+ static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
+ ASSERT(undefined->IsUndefined());
+ candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
}
- BaseMarker* base_marker_;
- MarkCompactCollector* mark_compact_collector_;
+ static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
+ Object* next_candidate = candidate->code()->gc_metadata();
+ return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
+ }
+
+ static void SetNextCandidate(SharedFunctionInfo* candidate,
+ SharedFunctionInfo* next_candidate) {
+ candidate->code()->set_gc_metadata(next_candidate);
+ }
+
+ static void ClearNextCandidate(SharedFunctionInfo* candidate) {
+ candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
+ }
+
+ Isolate* isolate_;
+ JSFunction* jsfunction_candidates_head_;
+ SharedFunctionInfo* shared_function_info_candidates_head_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
@@ -505,7 +590,7 @@ class MarkCompactCollector {
PRECISE
};
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
@@ -553,6 +638,7 @@ class MarkCompactCollector {
void RecordRelocSlot(RelocInfo* rinfo, Object* target);
void RecordCodeEntrySlot(Address slot, Code* target);
+ void RecordCodeTargetPatch(Address pc, Code* target);
INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
@@ -574,6 +660,8 @@ class MarkCompactCollector {
bool is_compacting() const { return compacting_; }
+ MarkingParity marking_parity() { return marking_parity_; }
+
private:
MarkCompactCollector();
~MarkCompactCollector();
@@ -606,14 +694,14 @@ class MarkCompactCollector {
bool abort_incremental_marking_;
+ MarkingParity marking_parity_;
+
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;
bool was_marked_incrementally_;
- bool flush_monomorphic_ics_;
-
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
GCTracer* tracer_;
@@ -636,15 +724,9 @@ class MarkCompactCollector {
friend class RootMarkingVisitor;
friend class MarkingVisitor;
- friend class StaticMarkingVisitor;
+ friend class MarkCompactMarkingVisitor;
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
- friend class Marker<IncrementalMarking>;
- friend class Marker<MarkCompactCollector>;
-
- // Mark non-optimize code for functions inlined into the given optimized
- // code. This will prevent it from being flushed.
- void MarkInlinedFunctionsCode(Code* code);
// Mark code objects that are active on the stack to prevent them
// from being flushed.
@@ -658,25 +740,13 @@ class MarkCompactCollector {
void AfterMarking();
// Marks the object black and pushes it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for non-incremental marking only.
- INLINE(bool MarkObjectAndPush(HeapObject* obj));
-
- // Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking only.
INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- // This is for non-incremental marking only.
- INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
-
// Marks the object black assuming that it is not yet marked.
// This is for non-incremental marking only.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
- void ProcessNewlyMarkedObject(HeapObject* obj);
-
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootMarkingVisitor* visitor);
@@ -779,7 +849,6 @@ class MarkCompactCollector {
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
Object* encountered_weak_maps_;
- Marker<MarkCompactCollector> marker_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
diff --git a/src/3rdparty/v8/src/messages.cc b/src/3rdparty/v8/src/messages.cc
index a0793c2..ce965fc 100644
--- a/src/3rdparty/v8/src/messages.cc
+++ b/src/3rdparty/v8/src/messages.cc
@@ -106,11 +106,20 @@ void MessageHandler::ReportMessage(Isolate* isolate,
// We are calling into embedder's code which can throw exceptions.
// Thus we need to save current exception state, reset it to the clean one
// and ignore scheduled exceptions callbacks can throw.
+
+ // We pass the exception object into the message handler callback though.
+ Object* exception_object = isolate->heap()->undefined_value();
+ if (isolate->has_pending_exception()) {
+ isolate->pending_exception()->ToObject(&exception_object);
+ }
+ Handle<Object> exception_handle(exception_object);
+
Isolate::ExceptionScope exception_scope(isolate);
isolate->clear_pending_exception();
isolate->set_external_caught_exception(false);
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
+ v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception_handle);
v8::NeanderArray global_listeners(FACTORY->message_listeners());
int global_length = global_listeners.length();
@@ -123,15 +132,13 @@ void MessageHandler::ReportMessage(Isolate* isolate,
for (int i = 0; i < global_length; i++) {
HandleScope scope;
if (global_listeners.get(i)->IsUndefined()) continue;
- v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
- Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
+ Handle<Foreign> callback_obj(Foreign::cast(global_listeners.get(i)));
v8::MessageCallback callback =
FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
- Handle<Object> callback_data(listener.get(1));
{
// Do not allow exceptions to propagate.
v8::TryCatch try_catch;
- callback(api_message_obj, v8::Utils::ToLocal(callback_data));
+ callback(api_message_obj, api_exception_obj);
}
if (isolate->has_scheduled_exception()) {
isolate->clear_scheduled_exception();
@@ -148,7 +155,9 @@ Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
JSFunction::cast(
Isolate::Current()->js_builtins_object()->
GetPropertyNoExceptionThrown(*fmt_str)));
- Handle<Object> argv[] = { data };
+ Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
+ Handle<Object> argv[] = { Handle<Object>(message->type()),
+ Handle<Object>(message->arguments()) };
bool caught_exception;
Handle<Object> result =
diff --git a/src/3rdparty/v8/src/messages.js b/src/3rdparty/v8/src/messages.js
index ab71936..f04bed9 100644
--- a/src/3rdparty/v8/src/messages.js
+++ b/src/3rdparty/v8/src/messages.js
@@ -26,18 +26,137 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// -------------------------------------------------------------------
-//
-// If this object gets passed to an error constructor the error will
-// get an accessor for .message that constructs a descriptive error
-// message on access.
-var kAddMessageAccessorsMarker = { };
-
-// This will be lazily initialized when first needed (and forcibly
-// overwritten even though it's const).
-var kMessages = 0;
-function FormatString(format, message) {
- var args = %MessageGetArguments(message);
+var kMessages = {
+ // Error
+ cyclic_proto: ["Cyclic __proto__ value"],
+ code_gen_from_strings: ["%0"],
+ // TypeError
+ unexpected_token: ["Unexpected token ", "%0"],
+ unexpected_token_number: ["Unexpected number"],
+ unexpected_token_string: ["Unexpected string"],
+ unexpected_token_identifier: ["Unexpected identifier"],
+ unexpected_reserved: ["Unexpected reserved word"],
+ unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
+ unexpected_eos: ["Unexpected end of input"],
+ malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
+ unterminated_regexp: ["Invalid regular expression: missing /"],
+ regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
+ incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
+ invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
+ invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"],
+ invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
+ invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
+ multiple_defaults_in_switch: ["More than one default clause in switch statement"],
+ newline_after_throw: ["Illegal newline after throw"],
+ redeclaration: ["%0", " '", "%1", "' has already been declared"],
+ no_catch_or_finally: ["Missing catch or finally after try"],
+ unknown_label: ["Undefined label '", "%0", "'"],
+ uncaught_exception: ["Uncaught ", "%0"],
+ stack_trace: ["Stack Trace:\n", "%0"],
+ called_non_callable: ["%0", " is not a function"],
+ undefined_method: ["Object ", "%1", " has no method '", "%0", "'"],
+ property_not_function: ["Property '", "%0", "' of object ", "%1", " is not a function"],
+ cannot_convert_to_primitive: ["Cannot convert object to primitive value"],
+ not_constructor: ["%0", " is not a constructor"],
+ not_defined: ["%0", " is not defined"],
+ non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
+ non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
+ non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"],
+ with_expression: ["%0", " has no properties"],
+ illegal_invocation: ["Illegal invocation"],
+ no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
+ apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
+ apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"],
+ invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
+ instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
+ instanceof_nonobject_proto: ["Function has non-object prototype '", "%0", "' in instanceof check"],
+ null_to_object: ["Cannot convert null to object"],
+ reduce_no_initial: ["Reduce of empty array with no initial value"],
+ getter_must_be_callable: ["Getter must be a function: ", "%0"],
+ setter_must_be_callable: ["Setter must be a function: ", "%0"],
+ value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
+ proto_object_or_null: ["Object prototype may only be an Object or null"],
+ property_desc_object: ["Property description must be an object: ", "%0"],
+ redefine_disallowed: ["Cannot redefine property: ", "%0"],
+ define_disallowed: ["Cannot define property:", "%0", ", object is not extensible."],
+ non_extensible_proto: ["%0", " is not extensible"],
+ handler_non_object: ["Proxy.", "%0", " called with non-object as handler"],
+ proto_non_object: ["Proxy.", "%0", " called with non-object as prototype"],
+ trap_function_expected: ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
+ handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
+ handler_trap_must_be_callable: ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
+ handler_returned_false: ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
+ handler_returned_undefined: ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
+ proxy_prop_not_configurable: ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
+ proxy_non_object_prop_names: ["Trap '", "%1", "' returned non-object ", "%0"],
+ proxy_repeated_prop_name: ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
+ invalid_weakmap_key: ["Invalid value used as weak map key"],
+ not_date_object: ["this is not a Date object."],
+ observe_non_object: ["Object.", "%0", " cannot ", "%0", " non-object"],
+ observe_non_function: ["Object.", "%0", " cannot deliver to non-function"],
+ observe_callback_frozen: ["Object.observe cannot deliver to a frozen function object"],
+ observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
+ observe_notify_non_notifier: ["notify called on non-notifier object"],
+ // RangeError
+ invalid_array_length: ["Invalid array length"],
+ stack_overflow: ["Maximum call stack size exceeded"],
+ invalid_time_value: ["Invalid time value"],
+ // SyntaxError
+ unable_to_parse: ["Parse error"],
+ invalid_regexp_flags: ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
+ invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
+ illegal_break: ["Illegal break statement"],
+ illegal_continue: ["Illegal continue statement"],
+ illegal_return: ["Illegal return statement"],
+ illegal_let: ["Illegal let declaration outside extended mode"],
+ error_loading_debugger: ["Error loading debugger"],
+ no_input_to_regexp: ["No input to ", "%0"],
+ invalid_json: ["String '", "%0", "' is not valid JSON"],
+ circular_structure: ["Converting circular structure to JSON"],
+ called_on_non_object: ["%0", " called on non-object"],
+ called_on_null_or_undefined: ["%0", " called on null or undefined"],
+ array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
+ object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
+ illegal_access: ["Illegal access"],
+ invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
+ strict_mode_with: ["Strict mode code may not include a with statement"],
+ strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
+ too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"],
+ too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
+ too_many_variables: ["Too many variables declared (only 131071 allowed)"],
+ strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
+ strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
+ strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
+ strict_function_name: ["Function name may not be eval or arguments in strict mode"],
+ strict_octal_literal: ["Octal literals are not allowed in strict mode."],
+ strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
+ accessor_data_property: ["Object literal may not have data and accessor property with the same name"],
+ accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"],
+ strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
+ strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
+ strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
+ strict_reserved_word: ["Use of future reserved word in strict mode"],
+ strict_delete: ["Delete of an unqualified identifier in strict mode."],
+ strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
+ strict_const: ["Use of const in strict mode."],
+ strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
+ strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
+ strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
+ strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
+ strict_caller: ["Illegal access to a strict mode caller function."],
+ unprotected_let: ["Illegal let declaration in unprotected statement context."],
+ unprotected_const: ["Illegal const declaration in unprotected statement context."],
+ cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
+ redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
+ harmony_const_assign: ["Assignment to constant variable."],
+ invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
+ module_type_error: ["Module '", "%0", "' used improperly"],
+ module_export_undefined: ["Export '", "%0", "' is not defined in module"],
+};
+
+
+function FormatString(format, args) {
var result = "";
var arg_num = 0;
for (var i = 0; i < format.length; i++) {
@@ -48,9 +167,14 @@ function FormatString(format, message) {
if (arg_num < 4) {
// str is one of %0, %1, %2 or %3.
try {
- str = ToDetailString(args[arg_num]);
+ str = NoSideEffectToString(args[arg_num]);
} catch (e) {
- str = "#<error>";
+ if (%IsJSModule(args[arg_num]))
+ str = "module";
+ else if (IS_SPEC_OBJECT(args[arg_num]))
+ str = "object";
+ else
+ str = "#<error>";
}
}
}
@@ -60,6 +184,26 @@ function FormatString(format, message) {
}
+function NoSideEffectToString(obj) {
+ if (IS_STRING(obj)) return obj;
+ if (IS_NUMBER(obj)) return %_NumberToString(obj);
+ if (IS_BOOLEAN(obj)) return x ? 'true' : 'false';
+ if (IS_UNDEFINED(obj)) return 'undefined';
+ if (IS_NULL(obj)) return 'null';
+ if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) {
+ var constructor = obj.constructor;
+ if (typeof constructor == "function") {
+ var constructorName = constructor.name;
+ if (IS_STRING(constructorName) && constructorName !== "") {
+ return "#<" + constructorName + ">";
+ }
+ }
+ }
+ if (IsNativeErrorObject(obj)) return %_CallFunction(obj, ErrorToString);
+ return %_CallFunction(obj, ObjectToString);
+}
+
+
// To check if something is a native error we need to check the
// concrete native error types. It is not sufficient to use instanceof
// since it possible to create an object that has Error.prototype on
@@ -110,7 +254,7 @@ function MakeGenericError(constructor, type, args) {
if (IS_UNDEFINED(args)) {
args = [];
}
- var e = new constructor(kAddMessageAccessorsMarker);
+ var e = new constructor(FormatMessage(type, args));
e.type = type;
e.arguments = args;
return e;
@@ -130,154 +274,10 @@ function MakeGenericError(constructor, type, args) {
// Helper functions; called from the runtime system.
-function FormatMessage(message) {
- if (kMessages === 0) {
- var messagesDictionary = [
- // Error
- "cyclic_proto", ["Cyclic __proto__ value"],
- "code_gen_from_strings", ["Code generation from strings disallowed for this context"],
- // TypeError
- "unexpected_token", ["Unexpected token ", "%0"],
- "unexpected_token_number", ["Unexpected number"],
- "unexpected_token_string", ["Unexpected string"],
- "unexpected_token_identifier", ["Unexpected identifier"],
- "unexpected_reserved", ["Unexpected reserved word"],
- "unexpected_strict_reserved", ["Unexpected strict mode reserved word"],
- "unexpected_eos", ["Unexpected end of input"],
- "malformed_regexp", ["Invalid regular expression: /", "%0", "/: ", "%1"],
- "unterminated_regexp", ["Invalid regular expression: missing /"],
- "regexp_flags", ["Cannot supply flags when constructing one RegExp from another"],
- "incompatible_method_receiver", ["Method ", "%0", " called on incompatible receiver ", "%1"],
- "invalid_lhs_in_assignment", ["Invalid left-hand side in assignment"],
- "invalid_lhs_in_for_in", ["Invalid left-hand side in for-in"],
- "invalid_lhs_in_postfix_op", ["Invalid left-hand side expression in postfix operation"],
- "invalid_lhs_in_prefix_op", ["Invalid left-hand side expression in prefix operation"],
- "multiple_defaults_in_switch", ["More than one default clause in switch statement"],
- "newline_after_throw", ["Illegal newline after throw"],
- "redeclaration", ["%0", " '", "%1", "' has already been declared"],
- "no_catch_or_finally", ["Missing catch or finally after try"],
- "unknown_label", ["Undefined label '", "%0", "'"],
- "uncaught_exception", ["Uncaught ", "%0"],
- "stack_trace", ["Stack Trace:\n", "%0"],
- "called_non_callable", ["%0", " is not a function"],
- "undefined_method", ["Object ", "%1", " has no method '", "%0", "'"],
- "property_not_function", ["Property '", "%0", "' of object ", "%1", " is not a function"],
- "cannot_convert_to_primitive", ["Cannot convert object to primitive value"],
- "not_constructor", ["%0", " is not a constructor"],
- "not_defined", ["%0", " is not defined"],
- "non_object_property_load", ["Cannot read property '", "%0", "' of ", "%1"],
- "non_object_property_store", ["Cannot set property '", "%0", "' of ", "%1"],
- "non_object_property_call", ["Cannot call method '", "%0", "' of ", "%1"],
- "with_expression", ["%0", " has no properties"],
- "illegal_invocation", ["Illegal invocation"],
- "no_setter_in_callback", ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
- "apply_non_function", ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
- "apply_wrong_args", ["Function.prototype.apply: Arguments list has wrong type"],
- "invalid_in_operator_use", ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
- "instanceof_function_expected", ["Expecting a function in instanceof check, but got ", "%0"],
- "instanceof_nonobject_proto", ["Function has non-object prototype '", "%0", "' in instanceof check"],
- "null_to_object", ["Cannot convert null to object"],
- "reduce_no_initial", ["Reduce of empty array with no initial value"],
- "getter_must_be_callable", ["Getter must be a function: ", "%0"],
- "setter_must_be_callable", ["Setter must be a function: ", "%0"],
- "value_and_accessor", ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
- "proto_object_or_null", ["Object prototype may only be an Object or null"],
- "property_desc_object", ["Property description must be an object: ", "%0"],
- "redefine_disallowed", ["Cannot redefine property: ", "%0"],
- "define_disallowed", ["Cannot define property:", "%0", ", object is not extensible."],
- "non_extensible_proto", ["%0", " is not extensible"],
- "handler_non_object", ["Proxy.", "%0", " called with non-object as handler"],
- "proto_non_object", ["Proxy.", "%0", " called with non-object as prototype"],
- "trap_function_expected", ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
- "handler_trap_missing", ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
- "handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
- "handler_returned_false", ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
- "handler_returned_undefined", ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
- "proxy_prop_not_configurable", ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
- "proxy_non_object_prop_names", ["Trap '", "%1", "' returned non-object ", "%0"],
- "proxy_repeated_prop_name", ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
- "invalid_weakmap_key", ["Invalid value used as weak map key"],
- // RangeError
- "invalid_array_length", ["Invalid array length"],
- "stack_overflow", ["Maximum call stack size exceeded"],
- "invalid_time_value", ["Invalid time value"],
- // SyntaxError
- "unable_to_parse", ["Parse error"],
- "invalid_regexp_flags", ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
- "invalid_regexp", ["Invalid RegExp pattern /", "%0", "/"],
- "illegal_break", ["Illegal break statement"],
- "illegal_continue", ["Illegal continue statement"],
- "illegal_return", ["Illegal return statement"],
- "illegal_let", ["Illegal let declaration outside extended mode"],
- "error_loading_debugger", ["Error loading debugger"],
- "no_input_to_regexp", ["No input to ", "%0"],
- "invalid_json", ["String '", "%0", "' is not valid JSON"],
- "circular_structure", ["Converting circular structure to JSON"],
- "called_on_non_object", ["%0", " called on non-object"],
- "called_on_null_or_undefined", ["%0", " called on null or undefined"],
- "array_indexof_not_defined", ["Array.getIndexOf: Argument undefined"],
- "object_not_extensible", ["Can't add property ", "%0", ", object is not extensible"],
- "illegal_access", ["Illegal access"],
- "invalid_preparser_data", ["Invalid preparser data for function ", "%0"],
- "strict_mode_with", ["Strict mode code may not include a with statement"],
- "strict_catch_variable", ["Catch variable may not be eval or arguments in strict mode"],
- "too_many_arguments", ["Too many arguments in function call (only 32766 allowed)"],
- "too_many_parameters", ["Too many parameters in function definition (only 32766 allowed)"],
- "too_many_variables", ["Too many variables declared (only 32767 allowed)"],
- "strict_param_name", ["Parameter name eval or arguments is not allowed in strict mode"],
- "strict_param_dupe", ["Strict mode function may not have duplicate parameter names"],
- "strict_var_name", ["Variable name may not be eval or arguments in strict mode"],
- "strict_function_name", ["Function name may not be eval or arguments in strict mode"],
- "strict_octal_literal", ["Octal literals are not allowed in strict mode."],
- "strict_duplicate_property", ["Duplicate data property in object literal not allowed in strict mode"],
- "accessor_data_property", ["Object literal may not have data and accessor property with the same name"],
- "accessor_get_set", ["Object literal may not have multiple get/set accessors with the same name"],
- "strict_lhs_assignment", ["Assignment to eval or arguments is not allowed in strict mode"],
- "strict_lhs_postfix", ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
- "strict_lhs_prefix", ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
- "strict_reserved_word", ["Use of future reserved word in strict mode"],
- "strict_delete", ["Delete of an unqualified identifier in strict mode."],
- "strict_delete_property", ["Cannot delete property '", "%0", "' of ", "%1"],
- "strict_const", ["Use of const in strict mode."],
- "strict_function", ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
- "strict_read_only_property", ["Cannot assign to read only property '", "%0", "' of ", "%1"],
- "strict_cannot_assign", ["Cannot assign to read only '", "%0", "' in strict mode"],
- "strict_poison_pill", ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
- "strict_caller", ["Illegal access to a strict mode caller function."],
- "unprotected_let", ["Illegal let declaration in unprotected statement context."],
- "unprotected_const", ["Illegal const declaration in unprotected statement context."],
- "cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
- "redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
- "harmony_const_assign", ["Assignment to constant variable."],
- "invalid_module_path", ["Module does not export '", "%0", "', or export is not itself a module"],
- "module_type_error", ["Module '", "%0", "' used improperly"],
- ];
- var messages = { __proto__ : null };
- for (var i = 0; i < messagesDictionary.length; i += 2) {
- var key = messagesDictionary[i];
- var format = messagesDictionary[i + 1];
-
- for (var j = 0; j < format.length; j++) {
- %IgnoreAttributesAndSetProperty(format, %_NumberToString(j), format[j],
- DONT_DELETE | READ_ONLY | DONT_ENUM);
- }
- %IgnoreAttributesAndSetProperty(format, 'length', format.length,
- DONT_DELETE | READ_ONLY | DONT_ENUM);
- %PreventExtensions(format);
- %IgnoreAttributesAndSetProperty(messages,
- key,
- format,
- DONT_DELETE | DONT_ENUM | READ_ONLY);
- }
- %PreventExtensions(messages);
- %IgnoreAttributesAndSetProperty(builtins, "kMessages",
- messages,
- DONT_DELETE | DONT_ENUM | READ_ONLY);
- }
- var message_type = %MessageGetType(message);
- var format = kMessages[message_type];
- if (!format) return "<unknown message " + message_type + ">";
- return FormatString(format, message);
+function FormatMessage(type, args) {
+ var format = kMessages[type];
+ if (!format) return "<unknown message " + type + ">";
+ return FormatString(format, args);
}
@@ -525,8 +525,8 @@ function ScriptLineCount() {
/**
- * Returns the name of script if available, contents of sourceURL comment
- * otherwise. See
+ * If sourceURL comment is available and script starts at zero returns sourceURL
+ * comment contents. Otherwise, script name is returned. See
* http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
* for details on using //@ sourceURL comment to identify scritps that don't
* have name.
@@ -535,14 +535,15 @@ function ScriptLineCount() {
* otherwise.
*/
function ScriptNameOrSourceURL() {
- if (this.name) {
+ if (this.line_offset > 0 || this.column_offset > 0) {
return this.name;
}
// The result is cached as on long scripts it takes noticable time to search
// for the sourceURL.
- if (this.hasCachedNameOrSourceURL)
- return this.cachedNameOrSourceURL;
+ if (this.hasCachedNameOrSourceURL) {
+ return this.cachedNameOrSourceURL;
+ }
this.hasCachedNameOrSourceURL = true;
// TODO(608): the spaces in a regexp below had to be escaped as \040
@@ -760,18 +761,18 @@ function DefineOneShotAccessor(obj, name, fun) {
// Note that the accessors consistently operate on 'obj', not 'this'.
// Since the object may occur in someone else's prototype chain we
// can't rely on 'this' being the same as 'obj'.
- var hasBeenSet = false;
var value;
+ var value_factory = fun;
var getter = function() {
- if (hasBeenSet) {
+ if (value_factory == null) {
return value;
}
- hasBeenSet = true;
- value = fun(obj);
+ value = value_factory(obj);
+ value_factory = null;
return value;
};
var setter = function(v) {
- hasBeenSet = true;
+ value_factory = null;
value = v;
};
%DefineOrRedefineAccessorProperty(obj, name, getter, setter, DONT_ENUM);
@@ -788,15 +789,7 @@ function CallSiteGetThis() {
}
function CallSiteGetTypeName() {
- var constructor = this.receiver.constructor;
- if (!constructor) {
- return %_CallFunction(this.receiver, ObjectToString);
- }
- var constructorName = constructor.name;
- if (!constructorName) {
- return %_CallFunction(this.receiver, ObjectToString);
- }
- return constructorName;
+ return GetTypeName(this, false);
}
function CallSiteIsToplevel() {
@@ -830,8 +823,10 @@ function CallSiteGetFunctionName() {
var name = this.fun.name;
if (name) {
return name;
- } else {
- return %FunctionGetInferredName(this.fun);
+ }
+ name = %FunctionGetInferredName(this.fun);
+ if (name) {
+ return name;
}
// Maybe this is an evaluation?
var script = %FunctionGetScript(this.fun);
@@ -859,9 +854,9 @@ function CallSiteGetMethodName() {
}
var name = null;
for (var prop in this.receiver) {
- if (this.receiver.__lookupGetter__(prop) === this.fun ||
- this.receiver.__lookupSetter__(prop) === this.fun ||
- (!this.receiver.__lookupGetter__(prop) &&
+ if (%_CallFunction(this.receiver, prop, ObjectLookupGetter) === this.fun ||
+ %_CallFunction(this.receiver, prop, ObjectLookupSetter) === this.fun ||
+ (!%_CallFunction(this.receiver, prop, ObjectLookupGetter) &&
this.receiver[prop] === this.fun)) {
// If we find more than one match bail out to avoid confusion.
if (name) {
@@ -927,17 +922,25 @@ function CallSiteToString() {
var fileLocation = "";
if (this.isNative()) {
fileLocation = "native";
- } else if (this.isEval()) {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName) {
- fileLocation = this.getEvalOrigin();
- }
} else {
- fileName = this.getFileName();
- }
+ if (this.isEval()) {
+ fileName = this.getScriptNameOrSourceURL();
+ if (!fileName) {
+ fileLocation = this.getEvalOrigin();
+ fileLocation += ", "; // Expecting source position to follow.
+ }
+ } else {
+ fileName = this.getFileName();
+ }
- if (fileName) {
- fileLocation += fileName;
+ if (fileName) {
+ fileLocation += fileName;
+ } else {
+ // Source code does not originate from a file and is not native, but we
+ // can still get the source position inside the source string, e.g. in
+ // an eval string.
+ fileLocation += "<anonymous>";
+ }
var lineNumber = this.getLineNumber();
if (lineNumber != null) {
fileLocation += ":" + lineNumber;
@@ -948,24 +951,25 @@ function CallSiteToString() {
}
}
- if (!fileLocation) {
- fileLocation = "unknown source";
- }
var line = "";
- var functionName = this.getFunction().name;
- var addPrefix = true;
+ var functionName = this.getFunctionName();
+ var addSuffix = true;
var isConstructor = this.isConstructor();
var isMethodCall = !(this.isToplevel() || isConstructor);
if (isMethodCall) {
+ var typeName = GetTypeName(this, true);
var methodName = this.getMethodName();
- line += this.getTypeName() + ".";
if (functionName) {
+ if (typeName && functionName.indexOf(typeName) != 0) {
+ line += typeName + ".";
+ }
line += functionName;
- if (methodName && (methodName != functionName)) {
+ if (methodName && functionName.lastIndexOf("." + methodName) !=
+ functionName.length - methodName.length - 1) {
line += " [as " + methodName + "]";
}
} else {
- line += methodName || "<anonymous>";
+ line += typeName + "." + (methodName || "<anonymous>");
}
} else if (isConstructor) {
line += "new " + (functionName || "<anonymous>");
@@ -973,9 +977,9 @@ function CallSiteToString() {
line += functionName;
} else {
line += fileLocation;
- addPrefix = false;
+ addSuffix = false;
}
- if (addPrefix) {
+ if (addSuffix) {
line += " (" + fileLocation + ")";
}
return line;
@@ -1085,6 +1089,19 @@ function FormatRawStackTrace(error, raw_stack) {
}
}
+function GetTypeName(obj, requireConstructor) {
+ var constructor = obj.receiver.constructor;
+ if (!constructor) {
+ return requireConstructor ? null :
+ %_CallFunction(obj.receiver, ObjectToString);
+ }
+ var constructorName = constructor.name;
+ if (!constructorName) {
+ return requireConstructor ? null :
+ %_CallFunction(obj.receiver, ObjectToString);
+ }
+ return constructorName;
+}
function captureStackTrace(obj, cons_opt) {
var stackTraceLimit = $Error.stackTraceLimit;
@@ -1138,13 +1155,7 @@ function SetUpError() {
%IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
%IgnoreAttributesAndSetProperty(this, 'arguments', void 0, DONT_ENUM);
%IgnoreAttributesAndSetProperty(this, 'type', void 0, DONT_ENUM);
- if (m === kAddMessageAccessorsMarker) {
- // DefineOneShotAccessor always inserts a message property and
- // ignores setters.
- DefineOneShotAccessor(this, 'message', function (obj) {
- return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
- });
- } else if (!IS_UNDEFINED(m)) {
+ if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty(
this, 'message', ToString(m), DONT_ENUM);
}
@@ -1207,11 +1218,6 @@ function ErrorToStringDetectCycle(error) {
var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
- var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
- if (type && !hasMessage) {
- var args = GetPropertyWithoutInvokingMonkeyGetters(error, "arguments");
- message = FormatMessage(%NewMessageObject(type, args));
- }
message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
if (name === "") return message;
if (message === "") return name;
diff --git a/src/3rdparty/v8/src/mips/assembler-mips-inl.h b/src/3rdparty/v8/src/mips/assembler-mips-inl.h
index 2ff4710..3e726a7 100644
--- a/src/3rdparty/v8/src/mips/assembler-mips-inl.h
+++ b/src/3rdparty/v8/src/mips/assembler-mips-inl.h
@@ -156,6 +156,11 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
}
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
@@ -208,10 +213,7 @@ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
+ return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
}
diff --git a/src/3rdparty/v8/src/mips/assembler-mips.cc b/src/3rdparty/v8/src/mips/assembler-mips.cc
index f347fdc..4ce924d 100644
--- a/src/3rdparty/v8/src/mips/assembler-mips.cc
+++ b/src/3rdparty/v8/src/mips/assembler-mips.cc
@@ -273,8 +273,8 @@ static const int kMinimalBufferSize = 4 * KB;
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
+ recorded_ast_id_(TypeFeedbackId::None()),
+ positions_recorder_(this) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -579,17 +579,20 @@ bool Assembler::IsNop(Instr instr, unsigned int type) {
// See Assembler::nop(type).
ASSERT(type < 32);
uint32_t opcode = GetOpcodeField(instr);
+ uint32_t function = GetFunctionField(instr);
uint32_t rt = GetRt(instr);
- uint32_t rs = GetRs(instr);
+ uint32_t rd = GetRd(instr);
uint32_t sa = GetSa(instr);
- // nop(type) == sll(zero_reg, zero_reg, type);
- // Technically all these values will be 0 but
- // this makes more sense to the reader.
+ // Traditional mips nop == sll(zero_reg, zero_reg, 0)
+ // When marking non-zero type, use sll(zero_reg, at, type)
+ // to avoid use of mips ssnop and ehb special encodings
+ // of the sll instruction.
- bool ret = (opcode == SLL &&
- rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ Register nop_rt_reg = (type == 0) ? zero_reg : at;
+ bool ret = (opcode == SPECIAL && function == SLL &&
+ rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
sa == type);
return ret;
@@ -2046,7 +2049,10 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
+ RelocInfo reloc_info_with_ast_id(pc_,
+ rmode,
+ RecordedAstId().ToInt(),
+ NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
diff --git a/src/3rdparty/v8/src/mips/assembler-mips.h b/src/3rdparty/v8/src/mips/assembler-mips.h
index 84714e5..fd2ff0d 100644
--- a/src/3rdparty/v8/src/mips/assembler-mips.h
+++ b/src/3rdparty/v8/src/mips/assembler-mips.h
@@ -318,12 +318,15 @@ const FPURegister f31 = { 31 };
// Register aliases.
// cp is assumed to be a callee saved register.
-static const Register& kLithiumScratchReg = s3; // Scratch register.
-static const Register& kLithiumScratchReg2 = s4; // Scratch register.
-static const Register& kRootRegister = s6; // Roots array pointer.
-static const Register& cp = s7; // JavaScript context pointer.
-static const DoubleRegister& kLithiumScratchDouble = f30;
-static const FPURegister& kDoubleRegZero = f28;
+// Defined using #define instead of "static const Register&" because Clang
+// complains otherwise when a compilation unit that includes this header
+// doesn't use the variables.
+#define kRootRegister s6
+#define cp s7
+#define kLithiumScratchReg s3
+#define kLithiumScratchReg2 s4
+#define kLithiumScratchDouble f30
+#define kDoubleRegZero f28
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -522,9 +525,6 @@ class Assembler : public AssemblerBase {
Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -568,6 +568,10 @@ class Assembler : public AssemblerBase {
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
static void JumpLabelToJumpRegister(Address pc);
static void QuietNaN(HeapObject* nan);
@@ -628,6 +632,8 @@ class Assembler : public AssemblerBase {
// register.
static const int kPcLoadDelta = 4;
+ static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize;
+
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceInstructions = 7;
@@ -660,10 +666,13 @@ class Assembler : public AssemblerBase {
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
};
- // Type == 0 is the default non-marking type.
+ // Type == 0 is the default non-marking nop. For mips this is a
+ // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
+ // marking, to avoid conflict with ssnop and ehb instructions.
void nop(unsigned int type = 0) {
ASSERT(type < 32);
- sll(zero_reg, zero_reg, type, true);
+ Register nop_rt_reg = (type == 0) ? zero_reg : at;
+ sll(zero_reg, nop_rt_reg, type, true);
}
@@ -909,17 +918,17 @@ class Assembler : public AssemblerBase {
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
- void SetRecordedAstId(unsigned ast_id) {
- ASSERT(recorded_ast_id_ == kNoASTId);
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
- unsigned RecordedAstId() {
- ASSERT(recorded_ast_id_ != kNoASTId);
+ TypeFeedbackId RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
- void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
+ void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
@@ -1016,9 +1025,7 @@ class Assembler : public AssemblerBase {
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
- unsigned recorded_ast_id_;
-
- bool emit_debug_code() const { return emit_debug_code_; }
+ TypeFeedbackId recorded_ast_id_;
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1270,7 +1277,6 @@ class Assembler : public AssemblerBase {
friend class BlockTrampolinePoolScope;
PositionsRecorder positions_recorder_;
- bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
diff --git a/src/3rdparty/v8/src/mips/builtins-mips.cc b/src/3rdparty/v8/src/mips/builtins-mips.cc
index eeb84c3..0342e65 100644
--- a/src/3rdparty/v8/src/mips/builtins-mips.cc
+++ b/src/3rdparty/v8/src/mips/builtins-mips.cc
@@ -79,12 +79,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the global context.
+ // Load the native context.
- __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the InternalArray function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
__ lw(result,
MemOperand(result,
Context::SlotOffset(
@@ -94,12 +95,13 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
+ // Load the native context.
- __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the Array function from the native context.
__ lw(result,
MemOperand(result,
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
@@ -118,7 +120,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -214,7 +216,8 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
+ __ LoadInitialArrayMap(array_function, scratch2,
+ elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ Assert(
@@ -449,10 +452,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Branch(call_generic_code);
__ bind(&not_double);
- // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// a3: JSArray
__ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
a2,
t5,
@@ -712,6 +715,43 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ __ push(a1); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore receiver.
+ __ pop(a1);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -1392,9 +1432,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ lw(a2, FieldMemOperand(cp, kGlobalIndex));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
__ lw(a2, FieldMemOperand(a2, kGlobalIndex));
__ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
@@ -1585,9 +1625,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
__ lw(a0, FieldMemOperand(a0, kGlobalOffset));
__ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
diff --git a/src/3rdparty/v8/src/mips/code-stubs-mips.cc b/src/3rdparty/v8/src/mips/code-stubs-mips.cc
index a5b4edd..b1fe4d5 100644
--- a/src/3rdparty/v8/src/mips/code-stubs-mips.cc
+++ b/src/3rdparty/v8/src/mips/code-stubs-mips.cc
@@ -87,6 +87,8 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
+ Counters* counters = masm->isolate()->counters();
+
Label gc;
// Pop the function info from the stack.
@@ -100,32 +102,44 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
+ __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
+
int map_index = (language_mode_ == CLASSIC_MODE)
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current global context and set that
+ // Compute the function map in the current native context and set that
// as the map of the allocated object.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
- __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
+ __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
+ __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
__ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
__ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
__ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
__ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ Label check_optimized;
+ Label install_unoptimized;
+ if (FLAG_cache_optimized_code) {
+ __ lw(a1,
+ FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ And(at, a1, a1);
+ __ Branch(&check_optimized, ne, at, Operand(zero_reg));
+ }
+ __ bind(&install_unoptimized);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
__ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -133,6 +147,72 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
__ Ret();
+ __ bind(&check_optimized);
+
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
+
+ // a2 holds native context, a1 points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // The optimized code map must never be empty, so check the first elements.
+ Label install_optimized;
+ // Speculatively move code object into t0.
+ __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
+ __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
+ __ Branch(&install_optimized, eq, a2, Operand(t1));
+
+ // Iterate through the rest of map backwards. t0 holds an index as a Smi.
+ Label loop;
+ __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
+ __ bind(&loop);
+ // Do not double check first entry.
+
+ __ Branch(&install_unoptimized, eq, t0,
+ Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ Subu(t0, t0, Operand(
+ Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
+ __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, t1, Operand(at));
+ __ lw(t1, MemOperand(t1));
+ __ Branch(&loop, ne, a2, Operand(t1));
+ // Hit: fetch the optimized code.
+ __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, t1, Operand(at));
+ __ Addu(t1, t1, Operand(kPointerSize));
+ __ lw(t0, MemOperand(t1));
+
+ __ bind(&install_optimized);
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(),
+ 1, t2, t3);
+
+ // TODO(fschneider): Idea: store proper code pointers in the map and either
+ // unmangle them on marking or do nothing as the whole map is discarded on
+ // major GC anyway.
+ __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
+
+ // Now link a function into a list of optimized functions.
+ __ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+ __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
+ // No need for write barrier as JSFunction (eax) is in the new space.
+
+ __ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Store JSFunction (eax) into edx before issuing write barrier as
+ // it clobbers all the registers passed.
+ __ mov(t0, v0);
+ __ RecordWriteContextSlot(
+ a2,
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+ t0,
+ a1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ // Return result. The argument function info has been popped already.
+ __ Ret();
+
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ LoadRoot(t0, Heap::kFalseValueRootIndex);
@@ -164,16 +244,16 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
// Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ li(a1, Operand(Smi::FromInt(0)));
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Copy the qml global object from the surrounding context.
- __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
+ __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
// Initialize the rest of the slots to undefined.
@@ -216,9 +296,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ li(a2, Operand(Smi::FromInt(length)));
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- // If this block context is nested in the global context we get a smi
+ // If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
+ // canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(a3, &after_sentinel);
@@ -227,20 +307,20 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(eq, message, a3, Operand(zero_reg));
}
__ lw(a3, GlobalObjectOperand());
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
- __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
+ __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
// Copy the qml global object from the surrounding context.
- __ lw(a1, ContextOperand(cp, Context::QML_GLOBAL_INDEX));
- __ sw(a1, ContextOperand(v0, Context::QML_GLOBAL_INDEX));
+ __ lw(a1, ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX));
+ __ sw(a1, ContextOperand(v0, Context::QML_GLOBAL_OBJECT_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
@@ -584,11 +664,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label is_smi, done;
@@ -650,11 +728,9 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
Register scratch3,
FPURegister double_scratch,
Label* not_number) {
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
Label done;
Label not_in_int32_range;
@@ -790,11 +866,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ Branch(&done);
__ bind(&obj_is_not_smi);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
@@ -861,11 +935,9 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ UntagAndJumpIfSmi(dst, object, &done);
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Object is a heap number.
@@ -2588,9 +2660,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
Register scratch3 = t0;
ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands && FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
+ if (smi_operands) {
+ __ AssertSmi(left);
+ __ AssertSmi(right);
}
Register heap_number_map = t2;
@@ -3179,7 +3251,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
} else {
// Tail call that writes the int32 in a2 to the heap number in v0, using
// a3 and a0 as scratch. v0 is preserved and returned.
- __ mov(a0, t1);
+ __ mov(v0, t1);
WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
__ TailCallStub(&stub);
}
@@ -3502,23 +3574,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1,
1);
} else {
- if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
+ ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatures::Scope scope(FPU);
Label no_update;
Label skip_cache;
// Call C function to calculate the result and update the cache.
- // Register a0 holds precalculated cache entry address; preserve
- // it on the stack and pop it into register cache_entry after the
- // call.
- __ Push(cache_entry, a2, a3);
+ // a0: precalculated cache entry address.
+ // a2 and a3: parts of the double value.
+ // Store a0, a2 and a3 on stack for later before calling C function.
+ __ Push(a3, a2, cache_entry);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(f4);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
- __ Pop(cache_entry, a2, a3);
+ __ Pop(a3, a2, cache_entry);
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
@@ -4615,14 +4687,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// v0 = address of new object(s) (tagged)
// a2 = argument count (tagged)
- // Get the arguments boilerplate from the current (global) context into t0.
+ // Get the arguments boilerplate from the current native context into t0.
const int kNormalOffset =
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kNormalOffset));
@@ -4810,9 +4882,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT |
SIZE_IN_WORDS));
- // Get the arguments boilerplate from the current (global) context.
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ // Get the arguments boilerplate from the current native context.
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
__ lw(t0, MemOperand(t0, Context::SlotOffset(
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
@@ -4946,7 +5018,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ Addu(a2, a2, Operand(2)); // a2 was a smi.
// Check that the static offsets vector buffer is large enough.
- __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+ __ Branch(
+ &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
// a2: Number of capture registers
// regexp_data: RegExp data (FixedArray)
@@ -5059,7 +5132,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// regexp_data: RegExp data (FixedArray)
// a0: Instance type of subject string
STATIC_ASSERT(kStringEncodingMask == 4);
- STATIC_ASSERT(kAsciiStringTag == 4);
+ STATIC_ASSERT(kOneByteStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
// Find the code object based on the assumptions above.
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
@@ -5092,7 +5165,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 8;
+ const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
@@ -5103,27 +5176,33 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// allocating space for the c argument slots, we don't need to calculate
// that into the argument positions on the stack. This is how the stack will
// look (sp meaning the value of sp at this moment):
+ // [sp + 5] - Argument 9
// [sp + 4] - Argument 8
// [sp + 3] - Argument 7
// [sp + 2] - Argument 6
// [sp + 1] - Argument 5
// [sp + 0] - saved ra
- // Argument 8: Pass current isolate address.
+ // Argument 9: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots.
__ li(a0, Operand(ExternalReference::isolate_address()));
- __ sw(a0, MemOperand(sp, 4 * kPointerSize));
+ __ sw(a0, MemOperand(sp, 5 * kPointerSize));
- // Argument 7: Indicate that this is a direct call from JavaScript.
+ // Argument 8: Indicate that this is a direct call from JavaScript.
__ li(a0, Operand(1));
- __ sw(a0, MemOperand(sp, 3 * kPointerSize));
+ __ sw(a0, MemOperand(sp, 4 * kPointerSize));
- // Argument 6: Start (high end) of backtracking stack memory area.
+ // Argument 7: Start (high end) of backtracking stack memory area.
__ li(a0, Operand(address_of_regexp_stack_memory_address));
__ lw(a0, MemOperand(a0, 0));
__ li(a2, Operand(address_of_regexp_stack_memory_size));
__ lw(a2, MemOperand(a2, 0));
__ addu(a0, a0, a2);
+ __ sw(a0, MemOperand(sp, 3 * kPointerSize));
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(a0, zero_reg);
__ sw(a0, MemOperand(sp, 2 * kPointerSize));
// Argument 5: static offsets vector buffer.
@@ -5174,7 +5253,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ __ Branch(&success, eq, v0, Operand(1));
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
Label failure;
__ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
// If not exception it can only be retry. Handle that in the runtime system.
@@ -5337,10 +5418,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ Addu(a3, v0, Operand(JSRegExpResult::kSize));
__ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
__ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
__ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
__ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
@@ -5365,12 +5446,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set FixedArray length.
__ sll(t2, t1, kSmiTagSize);
__ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with the-hole.
- __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
+ // Fill contents of fixed-array with undefined.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with hole.
+ // Fill fixed array elements with undefined.
// v0: JSArray, tagged.
- // a2: the hole.
+ // a2: undefined.
// a3: Start of elements in FixedArray.
// t1: Number of elements to fill.
Label loop;
@@ -5449,7 +5530,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&call, ne, t0, Operand(at));
// Patch the receiver on the stack with the global receiver object.
- __ lw(a3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a3,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
__ sw(a3, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
@@ -6217,7 +6299,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
@@ -6260,7 +6342,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
@@ -6632,9 +6714,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Branch(&ascii_data, ne, at, Operand(zero_reg));
__ xor_(t0, t0, t1);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
+ __ And(t0, t0, Operand(kOneByteStringTag | kAsciiDataHintTag));
+ __ Branch(
+ &ascii_data, eq, t0, Operand(kOneByteStringTag | kAsciiDataHintTag));
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
@@ -7244,8 +7327,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
ASSERT(!name.is(scratch1));
ASSERT(!name.is(scratch2));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
// Compute the capacity mask.
__ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
@@ -7431,8 +7513,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateSmiOnlyToObject
- // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // ElementsTransitionGenerator::GenerateMapChangeElementTransition
+ // and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
@@ -7441,6 +7523,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
{ REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
+ // FastNewClosureStub::Generate
+ { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7489,6 +7573,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
}
+bool CodeStub::CanUseFPRegisters() {
+ return CpuFeatures::IsSupported(FPU);
+}
+
+
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
@@ -7614,6 +7703,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
+ __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
+ __ lw(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
+ __ sw(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
+
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -7698,9 +7797,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(a2, t1, &double_elements);
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
__ JumpIfSmi(a0, &smi_element);
- __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
+ __ CheckFastSmiElements(a2, t1, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -7712,7 +7811,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(t1, t0);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -7725,8 +7824,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
__ bind(&smi_element);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -7735,16 +7834,78 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
+ __ StoreNumberToDoubleElements(a0, a3, a1,
+ // Overwrites all regs after this.
+ t1, t2, t3, t5, a2,
&slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ ProfileEntryHookStub stub;
+ __ push(ra);
+ __ CallStub(&stub);
+ __ pop(ra);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // The entry hook is a "push ra" instruction, followed by a call.
+ // Note: on MIPS "push" is 2 instruction
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
+
+ // Save live volatile registers.
+ __ Push(ra, t1, a1);
+ const int32_t kNumSavedRegs = 3;
+
+ // Compute the function's address for the first argument.
+ __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
+
+ // The caller's return address is above the saved temporaries.
+ // Grab that for the second argument to the hook.
+ __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
+
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ __ mov(t1, sp);
+ ASSERT(IsPowerOf2(frame_alignment));
+ __ And(sp, sp, Operand(-frame_alignment));
+ }
+
+#if defined(V8_HOST_ARCH_MIPS)
+ __ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
+ __ lw(at, MemOperand(at));
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ Address trampoline_address = reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(EntryHookTrampoline));
+ ApiFunction dispatcher(trampoline_address);
+ __ li(at, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+#endif
+ __ Call(at);
+
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+ __ mov(sp, t1);
+ }
+
+ __ Pop(ra, t1, a1);
+ __ Ret();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/codegen-mips.cc b/src/3rdparty/v8/src/mips/codegen-mips.cc
index 9acccdc..44e0359 100644
--- a/src/3rdparty/v8/src/mips/codegen-mips.cc
+++ b/src/3rdparty/v8/src/mips/codegen-mips.cc
@@ -72,7 +72,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
-void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
@@ -95,7 +95,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
}
-void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value
diff --git a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc b/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
index 62f3155..9fd815b 100644
--- a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
+++ b/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
@@ -48,6 +48,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (!function->IsOptimized()) return;
+ // The optimized code is going to be patched, so we cannot use it
+ // any more. Play safe and reset the whole cache.
+ function->shared()->ClearOptimizedCodeMap();
+
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
@@ -96,8 +100,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
+ ReplaceCodeForRelatedFunctions(function, code);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@@ -186,11 +189,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
}
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
@@ -209,7 +212,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
- int bailout_id = LookupBailoutId(data, ast_id);
+ int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
@@ -229,9 +232,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
+ int closure_id = iterator.Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
@@ -342,8 +345,8 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
@@ -567,19 +570,145 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (FLAG_trace_deopt) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 5 stack entries from StackFrame::INTERNAL (ra, fp, cp, frame type,
+ // code object, see MacroAssembler::EnterFrame). For a setter stub frame we
+ // need one additional entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
// This code is very similar to ia32/arm code, but relies on register names
// (fp, sp) and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
diff --git a/src/3rdparty/v8/src/mips/full-codegen-mips.cc b/src/3rdparty/v8/src/mips/full-codegen-mips.cc
index 842232a..46c9ecb 100644
--- a/src/3rdparty/v8/src/mips/full-codegen-mips.cc
+++ b/src/3rdparty/v8/src/mips/full-codegen-mips.cc
@@ -143,6 +143,8 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -191,11 +193,14 @@ void FullCodeGenerator::Generate() {
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0 ||
(scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in a1.
+ Comment cmnt(masm_, "[ Allocate context");
+ // Argument to NewContext is the function, which is still in a1.
__ push(a1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -271,7 +276,7 @@ void FullCodeGenerator::Generate() {
scope()->VisitIllegalRedeclaration(this);
} else {
- PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
@@ -286,7 +291,7 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
@@ -333,7 +338,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
- reset_value = 10;
+ reset_value = FLAG_interrupt_budget >> 4;
}
__ li(a2, Operand(profiling_counter_));
__ li(a3, Operand(Smi::FromInt(reset_value)));
@@ -341,10 +346,6 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 142;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
@@ -361,7 +362,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
@@ -414,7 +415,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -794,7 +795,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kWithContextMapRootIndex);
@@ -818,11 +819,12 @@ void FullCodeGenerator::VisitVariableDeclaration(
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ : isolate()->factory()->undefined_value(),
+ zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
break;
case Variable::PARAMETER:
@@ -849,10 +851,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
__ li(a2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR || mode == LET ||
- mode == CONST || mode == CONST_HARMONY);
- PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
- ? READ_ONLY : NONE;
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ li(a1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -879,13 +880,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_->Add(function);
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ globals_->Add(function, zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
break;
}
@@ -939,9 +940,9 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name());
- globals_->Add(instance);
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
Visit(declaration->module());
break;
}
@@ -1144,25 +1145,32 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
- __ mov(a2, v0);
- __ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- __ Branch(&fixed_array, ne, a1, Operand(at));
+ __ Branch(&fixed_array, ne, a2, Operand(at));
// We got a map in register v0. Get the enumeration cache from it.
+ Label no_descriptors;
__ bind(&use_cache);
- __ LoadInstanceDescriptors(v0, a1);
- __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
- __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ __ EnumLength(a1, v0);
+ __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
+
+ __ LoadInstanceDescriptors(v0, a2);
+ __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
+ __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(v0); // Map.
- __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ li(a0, Operand(Smi::FromInt(0)));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(a2, a1, a0);
__ jmp(&loop);
+ __ bind(&no_descriptors);
+ __ Drop(1);
+ __ jmp(&exit);
+
// We got a fixed array in register v0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
@@ -1171,7 +1179,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
- RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(a1, cell);
__ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ sw(a2, FieldMemOperand(a1, JSGlobalPropertyCell::kValueOffset));
@@ -1327,9 +1335,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ Move(next, current);
}
__ bind(&loop);
- // Terminate at global context.
+ // Terminate at native context.
__ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kGlobalContextMapRootIndex);
+ __ LoadRoot(t0, Heap::kNativeContextMapRootIndex);
__ Branch(&fast, eq, temp, Operand(t0));
// Check that extension is NULL.
__ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
@@ -1615,9 +1623,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
- AccessorTable accessor_table(isolate()->zone());
+ AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1644,7 +1652,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1715,7 +1723,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ bool has_fast_elements =
+ IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1737,8 +1746,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- ASSERT(constant_elements_kind == FAST_ELEMENTS ||
- constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1767,7 +1775,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
- if (constant_elements_kind == FAST_ELEMENTS) {
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ lw(t2, MemOperand(sp)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
@@ -1855,11 +1863,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@@ -1917,7 +1925,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1926,7 +1934,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1954,7 +1962,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2037,7 +2046,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@@ -2169,7 +2179,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, a1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ lw(a2, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -2202,44 +2212,17 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
ASSERT(prop != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- __ lw(t0, MemOperand(sp, kPointerSize)); // Receiver is now under value.
- __ push(t0);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(a0, result_register()); // Load the value.
__ li(a2, Operand(prop->key()->AsLiteral()->handle()));
- // Load receiver to a1. Leave a copy in the stack if needed for turning the
- // receiver into fast case.
- if (expr->ends_initialization_block()) {
- __ lw(a1, MemOperand(sp));
- } else {
- __ pop(a1);
- }
+ __ pop(a1);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(v0); // Result of assignment, saved even if not needed.
- // Receiver is under the result value.
- __ lw(t0, MemOperand(sp, kPointerSize));
- __ push(t0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(v0);
- __ Drop(1);
- }
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
}
@@ -2248,18 +2231,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- // Receiver is now under the key and value.
- __ lw(t0, MemOperand(sp, 2 * kPointerSize));
- __ push(t0);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
// Record source code position before IC call.
SetSourcePosition(expr->position());
// Call keyed store IC.
@@ -2269,29 +2240,13 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// - a2 is the receiver.
__ mov(a0, result_register());
__ pop(a1); // Key.
- // Load receiver to a2. Leave a copy in the stack if needed for turning the
- // receiver into fast case.
- if (expr->ends_initialization_block()) {
- __ lw(a2, MemOperand(sp));
- } else {
- __ pop(a2);
- }
+ __ pop(a2);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(v0); // Result of assignment, saved even if not needed.
- // Receiver is under the result value.
- __ lw(t0, MemOperand(sp, kPointerSize));
- __ push(t0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(v0);
- __ Drop(1);
- }
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
}
@@ -2304,6 +2259,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(v0);
} else {
VisitForStackValue(expr->obj());
@@ -2317,9 +2273,9 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId id) {
ic_total_count_++;
- __ Call(code, rmode, ast_id);
+ __ Call(code, rmode, id);
}
@@ -2340,7 +2296,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2373,7 +2329,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2393,16 +2349,14 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code, but not in the snapshot.
- if (!Serializer::enabled()) {
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ li(a2, Operand(cell));
- }
+ // Record call targets.
+ flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ li(a2, Operand(cell));
CallFunctionStub stub(arg_count, flags);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2595,21 +2549,15 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ li(a0, Operand(arg_count));
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code, but not in the snapshot.
- CallFunctionFlags flags;
- if (!Serializer::enabled()) {
- flags = RECORD_CALL_TARGET;
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ li(a2, Operand(cell));
- } else {
- flags = NO_CALL_FUNCTION_FLAGS;
- }
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ li(a2, Operand(cell));
- CallConstructStub stub(flags);
+ CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(v0);
@@ -2750,7 +2698,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(v0);
+ __ AssertNotSmi(v0);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
@@ -2764,28 +2712,31 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ Branch(if_false, eq, a2, Operand(t0));
// Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(a3, a1);
+ __ Branch(&done, eq, a3, Operand(zero_reg));
+
__ LoadInstanceDescriptors(a1, t0);
- __ lw(a3, FieldMemOperand(t0, FixedArray::kLengthOffset));
- // t0: descriptor array
- // a3: length of descriptor array
- // Calculate the end of the descriptor array.
+ // t0: descriptor array.
+ // a3: valid entries in the descriptor array.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kPointerSize == 4);
- __ Addu(a2, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ li(at, Operand(DescriptorArray::kDescriptorSize));
+ __ Mul(a3, a3, at);
+ // Calculate location of the first key name.
+ __ Addu(t0, t0, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
+ // Calculate the end of the descriptor array.
+ __ mov(a2, t0);
__ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a2, a2, t1);
- // Calculate location of the first key name.
- __ Addu(t0,
- t0,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag +
- DescriptorArray::kFirstIndex * kPointerSize));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
- Label entry, loop;
// The use of t2 to store the valueOf symbol asumes that it is not otherwise
// used in the loop below.
__ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex);
@@ -2793,17 +2744,18 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ bind(&loop);
__ lw(a3, MemOperand(t0, 0));
__ Branch(if_false, eq, a3, Operand(t2));
- __ Addu(t0, t0, Operand(kPointerSize));
+ __ Addu(t0, t0, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ Branch(&loop, ne, t0, Operand(a2));
- // If a valueOf property is not found on the object check that it's
+ __ bind(&done);
+ // If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
__ JumpIfSmi(a2, if_false);
__ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ lw(a3, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+ __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
__ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ Branch(if_false, ne, a2, Operand(a3));
@@ -3083,8 +3035,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(FPU)) {
__ PrepareCallCFunction(1, a0);
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(FPU);
@@ -3101,8 +3053,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
} else {
__ PrepareCallCFunction(2, a0);
__ mov(a0, s0);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalContextOffset));
+ __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
@@ -3166,21 +3118,19 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done;
+ Label runtime, done, not_date_object;
Register object = v0;
Register result = v0;
Register scratch0 = t5;
Register scratch1 = a1;
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ JumpIfSmi(object, &not_date_object);
__ GetObjectType(object, scratch1, scratch1);
- __ Assert(eq, "Trying to get date field from non-date.",
- scratch1, Operand(JS_DATE_TYPE));
-#endif
+ __ Branch(&not_date_object, ne, scratch1, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@@ -3197,9 +3147,12 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ li(a1, Operand(index));
__ Move(a0, object);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
+ __ jmp(&done);
}
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
context()->Plug(v0);
}
@@ -3474,10 +3427,11 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
VisitForAccumulatorValue(args->last()); // Function.
- // Check for proxy.
- Label proxy, done;
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(v0, &runtime);
__ GetObjectType(v0, a1, a1);
- __ Branch(&proxy, eq, a1, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
// InvokeFunction requires the function in a1. Move it in there.
__ mov(a1, result_register());
@@ -3487,7 +3441,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
- __ bind(&proxy);
+ __ bind(&runtime);
__ push(v0);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3516,7 +3470,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
+ isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
@@ -3528,8 +3482,8 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = v0;
Register cache = a1;
- __ lw(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ lw(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+ __ lw(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
__ lw(cache,
ContextOperand(
cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
@@ -3625,9 +3579,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(v0);
- }
+ __ AssertString(v0);
__ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
__ IndexFromHash(v0, v0);
@@ -3701,7 +3653,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Accumulated sum of string lengths (smi).
// element: Current array element.
// elements_end: Array end.
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
array_length, Operand(zero_reg));
}
@@ -3904,7 +3856,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -4060,7 +4012,8 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
context()->Plug(v0);
}
@@ -4118,7 +4071,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
- PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
// Call ToNumber only if operand is not a smi.
@@ -4171,7 +4124,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4204,7 +4157,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4222,7 +4175,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4426,7 +4379,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -4507,7 +4460,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
- // Contexts nested in the global context have a canonical empty function
+ // Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@@ -4537,14 +4490,57 @@ void FullCodeGenerator::EnterFinallyBlock() {
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
STATIC_ASSERT(0 == kSmiTag);
__ Addu(a1, a1, Operand(a1)); // Convert to smi.
+
+ // Store result register while executing finally block.
+ __ push(a1);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ li(at, Operand(pending_message_obj));
+ __ lw(a1, MemOperand(at));
+ __ push(a1);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ li(at, Operand(has_pending_message));
+ __ lw(a1, MemOperand(at));
+ __ SmiTag(a1);
+ __ push(a1);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ li(at, Operand(pending_message_script));
+ __ lw(a1, MemOperand(at));
__ push(a1);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(a1));
+ // Restore pending message from stack.
+ __ pop(a1);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ li(at, Operand(pending_message_script));
+ __ sw(a1, MemOperand(at));
+
+ __ pop(a1);
+ __ SmiUntag(a1);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ li(at, Operand(has_pending_message));
+ __ sw(a1, MemOperand(at));
+
+ __ pop(a1);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ li(at, Operand(pending_message_obj));
+ __ sw(a1, MemOperand(at));
+
// Restore result register from stack.
__ pop(a1);
+
// Uncook return address and return.
__ pop(result_register());
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
diff --git a/src/3rdparty/v8/src/mips/ic-mips.cc b/src/3rdparty/v8/src/mips/ic-mips.cc
index 964a7e2..cf70681 100644
--- a/src/3rdparty/v8/src/mips/ic-mips.cc
+++ b/src/3rdparty/v8/src/mips/ic-mips.cc
@@ -398,7 +398,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
- NORMAL,
+ Code::NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
@@ -1189,6 +1189,145 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length,
+ Register value,
+ Register key,
+ Register receiver,
+ Register receiver_map,
+ Register elements_map,
+ Register elements) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ Register scratch_value = t0;
+ Register address = t1;
+ if (check_map == kCheckMap) {
+ __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Branch(fast_double, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ }
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch_value);
+ __ sw(value, MemOperand(address));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, scratch_value,
+ &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch_value);
+ __ sw(value, MemOperand(address));
+ // Update write barrier for the elements array address.
+ __ mov(scratch_value, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ scratch_value,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
+ __ Branch(slow, ne, elements_map, Operand(at));
+ }
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ receiver,
+ elements, // Overwritten.
+ a3, // Scratch regs...
+ t0,
+ t1,
+ t2,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+ __ bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&non_double_value, ne, t0, Operand(at));
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ t0,
+ slow);
+ ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ t0,
+ slow);
+ ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ t0,
+ slow);
+ ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
@@ -1197,11 +1336,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- a2 : receiver
// -- ra : return address
// -----------------------------------
- Label slow, array, extra, check_if_double_array;
- Label fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
- Label transition_smi_elements, finish_object_store, non_double_value;
- Label transition_double_elements;
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
// Register usage.
Register value = a0;
@@ -1233,7 +1370,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&fast_object_with_map_check, lo, key, Operand(t0));
+ __ Branch(&fast_object, lo, key, Operand(t0));
// Slow case, handle jump to runtime.
__ bind(&slow);
@@ -1258,19 +1395,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ Branch(
&check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
- // Calculate key + 1 as smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(t0, key, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Branch(&fast_object_without_map_check);
+ __ jmp(&fast_object_grow);
__ bind(&check_if_double_array);
__ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- // Add 1 to key, and go to common element store code for doubles.
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(t0, key, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ jmp(&fast_double_without_map_check);
+ __ jmp(&fast_double_grow);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -1281,109 +1410,15 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check the key against the length in the array.
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(t0));
- // Fall through to fast case.
-
- __ bind(&fast_object_with_map_check);
- Register scratch_value = t0;
- Register address = t1;
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&fast_double_with_map_check,
- ne,
- elements_map,
- Heap::kFixedArrayMapRootIndex);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
- __ sw(value, MemOperand(address));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value);
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
- __ sw(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(v0, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- value,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- receiver,
- elements,
- a3,
- t0,
- t1,
- t2,
- &transition_double_elements);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value);
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&non_double_value, ne, t0, Operand(at));
-
- // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- t0,
- &slow);
- ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- t0,
- &slow);
- ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- t0,
- &slow);
- ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
}
@@ -1471,7 +1506,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&fail);
diff --git a/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc b/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
index d016743..b268fb3 100644
--- a/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
+++ b/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
@@ -89,17 +89,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LChunkBuilder::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -125,6 +116,8 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -260,7 +253,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() {
// TODO(plind): not clear that this will have advantage for MIPS.
// Skipping it for now. Raised issue #100 for this.
- Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
+ Abort("Unimplemented: GenerateDeoptJumpTable");
return false;
}
@@ -293,7 +286,8 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -331,7 +325,8 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -355,9 +350,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
+ return constant->handle();
}
@@ -367,33 +362,33 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
+ ASSERT(constant->HasInteger32Value());
+ return constant->Integer32Value();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- return value->Number();
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
}
Operand LCodeGen::ToOperand(LOperand* op) {
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- return Operand(static_cast<int32_t>(literal->Number()));
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
Abort("ToOperand Unsupported double immediate.");
}
ASSERT(r.IsTagged());
- return Operand(literal);
+ return Operand(constant->handle());
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
@@ -438,7 +433,9 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -446,8 +443,21 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ // Function parameters are arguments to the outermost environment. The
+ // arguments index points to the first element of a sequence of tagged
+ // values on the stack that represent the arguments. This needs to be
+ // kept in sync with the LArgumentsElements implementation.
+ *arguments_index = -environment->parameter_count();
+ *arguments_count = environment->parameter_count();
+
+ WriteTranslation(environment->outer(),
+ translation,
+ arguments_index,
+ arguments_count);
+ int closure_id = *info()->closure() != *environment->closure()
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -455,12 +465,31 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
- default:
- UNREACHABLE();
}
+
+ // Inlined frames which push their arguments cause the index to be
+ // bumped and a new stack area to be used for materialization.
+ if (environment->entry() != NULL &&
+ environment->entry()->arguments_pushed()) {
+ *arguments_index = *arguments_index < 0
+ ? GetStackSlotCount()
+ : *arguments_index + *arguments_count;
+ *arguments_count = environment->entry()->arguments_count() + 1;
+ }
+
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -471,7 +500,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -479,26 +511,39 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
- false);
+ false,
+ false,
+ *arguments_index,
+ *arguments_count);
}
}
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged) {
+ bool is_tagged,
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject();
+ translation->StoreArgumentsObject(arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
@@ -512,6 +557,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@@ -519,8 +566,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
DoubleRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -587,20 +634,22 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
+ int args_index = 0;
+ int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
- Translation translation(&translations_, frame_count, jsframe_count);
- WriteTranslation(environment, &translation);
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation, &args_index, &args_count);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment);
+ deoptimizations_.Add(environment, zone());
}
}
@@ -658,13 +707,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
data->SetLiteralArray(*literals);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@@ -679,7 +728,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
- deoptimization_literals_.Add(literal);
+ deoptimization_literals_.Add(literal, zone());
return result;
}
@@ -725,14 +774,14 @@ void LCodeGen::RecordSafepoint(
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
+ safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (kind & Safepoint::kWithRegisters) {
// Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
+ safepoint.DefinePointerRegister(cp, zone());
}
}
@@ -744,7 +793,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -863,7 +912,7 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
Register scratch = scratch0();
- const Register left = ToRegister(instr->InputAt(0));
+ const Register left = ToRegister(instr->left());
const Register result = ToRegister(instr->result());
Label done;
@@ -891,7 +940,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ And(result, scratch, p2constant - 1);
} else {
// div runs in the background while we check for special cases.
- Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+ Register right = EmitLoadRegister(instr->right(), scratch);
__ div(left, right);
// Check for x % 0.
@@ -911,8 +960,8 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
- const Register left = ToRegister(instr->InputAt(0));
- const Register right = ToRegister(instr->InputAt(1));
+ const Register left = ToRegister(instr->left());
+ const Register right = ToRegister(instr->right());
const Register result = ToRegister(instr->result());
// On MIPS div is asynchronous - it will run in the background while we
@@ -950,8 +999,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
// Note that result may alias left.
- Register left = ToRegister(instr->InputAt(0));
- LOperand* right_op = instr->InputAt(1);
+ Register left = ToRegister(instr->left());
+ LOperand* right_op = instr->right();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
@@ -1021,7 +1070,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
Register right = EmitLoadRegister(right_op, scratch);
if (bailout_on_minus_zero) {
- __ Or(ToRegister(instr->TempAt(0)), left, right);
+ __ Or(ToRegister(instr->temp()), left, right);
}
if (can_overflow) {
@@ -1041,7 +1090,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Branch(&done, ne, result, Operand(zero_reg));
DeoptimizeIf(lt,
instr->environment(),
- ToRegister(instr->TempAt(0)),
+ ToRegister(instr->temp()),
Operand(zero_reg));
__ bind(&done);
}
@@ -1050,8 +1099,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->InputAt(0);
- LOperand* right_op = instr->InputAt(1);
+ LOperand* left_op = instr->left();
+ LOperand* right_op = instr->right();
ASSERT(left_op->IsRegister());
Register left = ToRegister(left_op);
Register result = ToRegister(instr->result());
@@ -1084,8 +1133,8 @@ void LCodeGen::DoBitI(LBitI* instr) {
void LCodeGen::DoShiftI(LShiftI* instr) {
// Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
// result may alias either of them.
- LOperand* right_op = instr->InputAt(1);
- Register left = ToRegister(instr->InputAt(0));
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
if (right_op->IsRegister()) {
@@ -1147,8 +1196,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
@@ -1212,21 +1261,28 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
}
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLength(result, map);
+}
+
+
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
// Load map into |result|.
__ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -1239,9 +1295,9 @@ void LCodeGen::DoElementsKind(LElementsKind* instr) {
void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->TempAt(0));
+ Register map = ToRegister(instr->temp());
Label done;
// If the object is a smi return the object.
@@ -1258,9 +1314,9 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
Label runtime, done;
ASSERT(object.is(a0));
@@ -1268,12 +1324,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ And(at, object, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- __ Assert(eq, "Trying to get date field from non-date.",
- scratch, Operand(JS_DATE_TYPE));
-#endif
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1298,14 +1352,14 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
__ Nor(result, zero_reg, Operand(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+ Register input_reg = EmitLoadRegister(instr->value(), at);
__ push(input_reg);
CallRuntime(Runtime::kThrow, 1, instr);
@@ -1316,8 +1370,8 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
@@ -1354,9 +1408,71 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Register left_reg = ToRegister(left);
+ Operand right_op = (right->IsRegister() || right->IsConstantOperand())
+ ? ToOperand(right)
+ : Operand(EmitLoadRegister(right, at));
+ Register result_reg = ToRegister(instr->result());
+ Label return_right, done;
+ if (!result_reg.is(left_reg)) {
+ __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
+ __ mov(result_reg, left_reg);
+ __ Branch(&done);
+ }
+ __ Branch(&done, condition, left_reg, right_op);
+ __ bind(&return_right);
+ __ Addu(result_reg, zero_reg, right_op);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ FPURegister left_reg = ToDoubleRegister(left);
+ FPURegister right_reg = ToDoubleRegister(right);
+ FPURegister result_reg = ToDoubleRegister(instr->result());
+ Label check_nan_left, check_zero, return_left, return_right, done;
+ __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
+ __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
+ __ Branch(&return_right);
+
+ __ bind(&check_zero);
+ // left == right != 0.
+ __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ __ neg_d(left_reg, left_reg);
+ __ sub_d(result_reg, left_reg, right_reg);
+ __ neg_d(result_reg, result_reg);
+ } else {
+ __ add_d(result_reg, left_reg, right_reg);
+ }
+ __ Branch(&done);
+
+ __ bind(&check_nan_left);
+ // left == NaN.
+ __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
+ __ bind(&return_right);
+ if (!right_reg.is(result_reg)) {
+ __ mov_d(result_reg, right_reg);
+ }
+ __ Branch(&done);
+
+ __ bind(&return_left);
+ if (!left_reg.is(result_reg)) {
+ __ mov_d(result_reg, left_reg);
+ }
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
- DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
@@ -1396,8 +1512,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(a1));
- ASSERT(ToRegister(instr->InputAt(1)).is(a0));
+ ASSERT(ToRegister(instr->left()).is(a1));
+ ASSERT(ToRegister(instr->right()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
@@ -1461,15 +1577,15 @@ void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
} else {
ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
@@ -1603,8 +1719,8 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1655,8 +1771,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1665,7 +1781,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
+ Register left = ToRegister(instr->left());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1677,7 +1793,7 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register scratch = scratch0();
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely
@@ -1743,8 +1859,8 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
Register temp2 = scratch0();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1771,8 +1887,8 @@ Condition LCodeGen::EmitIsString(Register input,
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1790,15 +1906,15 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+ Register input_reg = EmitLoadRegister(instr->value(), at);
__ And(at, input_reg, kSmiTagMask);
EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
}
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1867,7 +1983,7 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1886,12 +2002,10 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
+ __ AssertString(input);
__ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@@ -1900,7 +2014,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register scratch = scratch0();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1980,9 +2094,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register temp = scratch0();
- Register temp2 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->temp());
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1998,8 +2112,8 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
@@ -2010,8 +2124,8 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
Label true_label, done;
- ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
- ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
+ ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
+ ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
Register result = ToRegister(instr->result());
ASSERT(result.is(v0));
@@ -2045,11 +2159,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register object = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
ASSERT(object.is(a0));
@@ -2124,7 +2238,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// Get the temp register reserved by the instruction. This needs to be t0 as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
ASSERT(temp.is(t0));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 7;
@@ -2219,7 +2333,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// it as no longer deleted.
if (instr->hydrogen()->RequiresHoleCheck()) {
// We use a temp to check the payload.
- Register payload = ToRegister(instr->TempAt(0));
+ Register payload = ToRegister(instr->temp());
__ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
@@ -2302,7 +2416,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
@@ -2316,12 +2430,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name) {
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
- type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
- if (lookup.type() == FIELD) {
+ type->LookupDescriptor(NULL, *name, &lookup);
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2333,9 +2447,22 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
- } else {
+ } else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
+ Heap* heap = type->GetHeap();
+ while (*current != heap->null_value()) {
+ __ LoadHeapObject(result, current);
+ __ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
+ DeoptimizeIf(ne, env, result, Operand(Handle<Map>(current->map())));
+ current =
+ Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
+ }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
@@ -2343,7 +2470,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
- Register scratch = scratch0();
+ Register object_map = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
@@ -2354,17 +2481,25 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
}
Handle<String> name = instr->hydrogen()->name();
Label done;
- __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label check_passed;
+ __ CompareMapAndBranch(
+ object_map, map, &check_passed,
+ eq, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
} else {
Label next;
- __ Branch(&next, ne, scratch, Operand(map));
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ Branch(&next);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
__ Branch(&done);
__ bind(&next);
}
@@ -2434,7 +2569,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->object());
Register scratch = scratch0();
__ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
@@ -2449,8 +2584,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ Ext(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
- __ Branch(&done, eq, scratch,
- Operand(FAST_ELEMENTS));
+ __ Branch(&fail, lt, scratch,
+ Operand(GetInitialFastElementsKind()));
+ __ Branch(&done, le, scratch,
+ Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ Branch(&fail, lt, scratch,
Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ Branch(&done, le, scratch,
@@ -2465,7 +2602,7 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->InputAt(0));
+ Register from_reg = ToRegister(instr->object());
__ lw(to_reg, FieldMemOperand(from_reg,
ExternalArray::kExternalPointerOffset));
}
@@ -2476,14 +2613,6 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
-
- // Bailout index is not a valid argument index. Use unsigned check to get
- // negative check for free.
-
- // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
- // as they do in Arm. It will save us an instruction.
- DeoptimizeIf(ls, instr->environment(), length, Operand(index));
-
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them, add one more.
__ subu(length, length, index);
@@ -2494,65 +2623,8 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Register elements = ToRegister(instr->elements());
- Register key = EmitLoadRegister(instr->key(), scratch0());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Load the result.
- __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
- __ addu(scratch, elements, scratch);
- __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DoubleRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int shift_size =
- ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
-
- if (key_is_constant) {
- __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(elements, elements, Operand(scratch));
- __ Addu(elements, elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
-
- __ ldc1(result, MemOperand(elements));
-}
-
-
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -2565,36 +2637,33 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+ __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0()));
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0()));
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
}
} else {
Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- MemOperand mem_operand(zero_reg);
- if (key_is_constant) {
- mem_operand = MemOperand(external_pointer,
- constant_key * (1 << shift_size));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, scratch, external_pointer);
- mem_operand = MemOperand(scratch);
- }
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ lb(result, mem_operand);
@@ -2614,17 +2683,19 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ lw(result, mem_operand);
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
+ result, Operand(0x80000000));
+ }
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -2634,6 +2705,145 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
}
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ if (key_is_constant) {
+ __ Addu(elements, elements,
+ Operand(((constant_key + instr->additional_index()) <<
+ element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(elements, elements, Operand(scratch));
+ __ Addu(elements, elements,
+ Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+ (instr->additional_index() << element_size_shift)));
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ }
+
+ __ ldc1(result, MemOperand(elements));
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ Register key = EmitLoadRegister(instr->key(), scratch0());
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ lw(result, FieldMemOperand(store_base, offset));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ And(scratch, result, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset) {
+ if (additional_index != 0 && !key_is_constant) {
+ additional_index *= 1 << (element_size - shift_size);
+ __ Addu(scratch0(), key, Operand(additional_index));
+ }
+
+ if (key_is_constant) {
+ return MemOperand(base,
+ (constant_key << element_size) + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (shift_size >= 0) {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ __ srl(scratch0(), key, 1);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ }
+ }
+
+ if (shift_size >= 0) {
+ __ sll(scratch0(), scratch0(), shift_size);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ __ srl(scratch0(), scratch0(), 1);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ }
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->key()).is(a0));
@@ -2666,7 +2876,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->InputAt(0));
+ Register elem = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
Label done;
@@ -2784,7 +2994,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
+ LOperand* argument = instr->value();
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
Abort("DoPushArgument not implemented for double type.");
} else {
@@ -2801,7 +3011,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ LoadHeapObject(result, instr->hydrogen()->closure());
+ __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -2830,12 +3040,14 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(cp, instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX));
+ __ lw(result, ContextOperand(cp, instr->qml_global()
+ ? Context::QML_GLOBAL_OBJECT_INDEX
+ : Context::GLOBAL_OBJECT_INDEX));
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
+ Register global = ToRegister(instr->global_object());
Register result = ToRegister(instr->result());
__ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
}
@@ -2857,14 +3069,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadHeapObject(a1, function);
}
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- }
+ // Change context.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
@@ -2902,7 +3108,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -2967,7 +3173,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label done;
@@ -2998,7 +3204,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- FPURegister input = ToDoubleRegister(instr->InputAt(0));
+ FPURegister input = ToDoubleRegister(instr->value());
FPURegister result = ToDoubleRegister(instr->result());
__ abs_d(result, input);
} else if (r.IsInteger32()) {
@@ -3006,8 +3212,8 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
} else {
// Representation is tagged.
DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->InputAt(0));
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input, deferred->entry());
// If smi, handle it directly.
@@ -3018,11 +3224,11 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
FPURegister single_scratch = double_scratch0().low();
Register scratch1 = scratch0();
- Register except_flag = ToRegister(instr->TempAt(0));
+ Register except_flag = ToRegister(instr->temp());
__ EmitFPUTruncate(kRoundToMinusInf,
single_scratch,
@@ -3049,7 +3255,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Label done, check_sign_on_zero;
@@ -3126,16 +3332,16 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
__ sqrt_d(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+ DoubleRegister temp = ToDoubleRegister(instr->temp());
ASSERT(!input.is(result));
@@ -3160,11 +3366,11 @@ void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
- ToDoubleRegister(instr->InputAt(1)).is(f4));
- ASSERT(!instr->InputAt(1)->IsRegister() ||
- ToRegister(instr->InputAt(1)).is(a2));
- ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(f4));
+ ASSERT(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(a2));
+ ASSERT(ToDoubleRegister(instr->left()).is(f2));
ASSERT(ToDoubleRegister(instr->result()).is(f0));
if (exponent_type.IsTagged()) {
@@ -3197,20 +3403,20 @@ void LCodeGen::DoRandom(LRandom* instr) {
LRandom* instr_;
};
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(f0));
- ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+ ASSERT(ToRegister(instr->global_object()).is(a0));
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
- // a2: FixedArray of the global context's random seeds
+ // a2: FixedArray of the native context's random seeds
// Load state[0].
__ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
@@ -3411,7 +3617,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+ ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
@@ -3436,6 +3642,18 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (!instr->transition().is_null()) {
__ li(scratch, Operand(instr->transition()));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ Register temp = ToRegister(instr->temp());
+ // Update the write barrier for the map field.
+ __ RecordWriteField(object,
+ HeapObject::kMapOffset,
+ scratch,
+ temp,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ }
}
// Do the store.
@@ -3487,101 +3705,52 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptimizeIf(hs,
- instr->environment(),
- ToRegister(instr->index()),
- Operand(ToRegister(instr->length())));
-}
-
-
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Register scratch = scratch0();
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset =
- ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
- __ sw(value, FieldMemOperand(elements, offset));
- } else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kRAHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
+void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand) {
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
+ if (operand->IsRegister()) {
+ __ And(at, ToRegister(operand), Operand(kSmiTagMask));
+ DeoptimizeIf(ne, environment, at, Operand(zero_reg));
+ } else {
+ __ li(at, ToOperand(operand));
+ __ And(at, at, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, environment, at, Operand(zero_reg));
+ }
}
}
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->length(),
+ instr->length());
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->index(),
+ instr->index());
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
+ __ li(at, Operand(Smi::FromInt(constant_index)));
+ } else {
+ __ li(at, Operand(constant_index));
}
+ DeoptimizeIf(hs,
+ instr->environment(),
+ at,
+ Operand(ToRegister(instr->length())));
} else {
- key = ToRegister(instr->key());
- }
- int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- if (key_is_constant) {
- __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, elements, Operand(scratch));
- __ Addu(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- Label is_nan;
- // Check for NaN. All NaNs must be canonicalized.
- __ BranchF(NULL, &is_nan, eq, value, value);
- __ Branch(&not_nan);
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ bind(&is_nan);
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ DeoptimizeIf(hs,
+ instr->environment(),
+ ToRegister(instr->index()),
+ Operand(ToRegister(instr->length())));
}
-
- __ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch));
}
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
-
- Register external_pointer = ToRegister(instr->external_pointer());
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -3594,13 +3763,17 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+ __ Addu(scratch0(), external_pointer, constant_key <<
+ element_size_shift);
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
@@ -3608,22 +3781,16 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(scratch0()));
+ __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(scratch0()));
+ __ sdc1(value, MemOperand(scratch0(), additional_offset));
}
} else {
Register value(ToRegister(instr->value()));
- MemOperand mem_operand(zero_reg);
- Register scratch = scratch0();
- if (key_is_constant) {
- mem_operand = MemOperand(external_pointer,
- constant_key * (1 << shift_size));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, scratch, external_pointer);
- mem_operand = MemOperand(scratch);
- }
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -3642,7 +3809,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3651,6 +3821,117 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
}
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ Label not_nan;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ if (key_is_constant) {
+ __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, elements, Operand(scratch));
+ __ Addu(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ Label is_nan;
+ // Check for NaN. All NaNs must be canonicalized.
+ __ BranchF(NULL, &is_nan, eq, value, value);
+ __ Branch(&not_nan);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ bind(&is_nan);
+ __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ }
+
+ __ bind(&not_nan);
+ __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+ : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ sw(value, FieldMemOperand(store_base, offset));
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a2));
ASSERT(ToRegister(instr->key()).is(a1));
@@ -3665,7 +3946,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
@@ -3680,21 +3961,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
__ li(new_map_reg, Operand(to_map));
- if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kRAHasBeenSaved, kDontSaveFPRegs);
- } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
- to_kind == FAST_DOUBLE_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ } else if (IsFastSmiElementsKind(from_kind) &&
+ IsFastDoubleElementsKind(to_kind)) {
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
- } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ } else if (IsFastDoubleElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind)) {
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
__ mov(fixed_object_reg, object_reg);
@@ -3727,7 +4009,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
};
DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
ToRegister(instr->index()),
@@ -3761,9 +4043,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ push(index);
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(v0);
- }
+ __ AssertSmi(v0);
__ SmiUntag(v0);
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -3781,7 +4061,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
};
DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -3819,14 +4099,14 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->InputAt(0));
+ Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
__ lw(result, FieldMemOperand(string, String::kLengthOffset));
}
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
@@ -3842,47 +4122,95 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+
+ FPURegister dbl_scratch = double_scratch0();
+ __ mtc1(ToRegister(input), dbl_scratch);
+ __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI: public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->value(),
+ SIGNED_INT32);
+ }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
- Register src = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(instr->value());
Register dst = ToRegister(instr->result());
Register overflow = scratch0();
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTagCheckOverflow(dst, src, overflow);
__ BranchOnOverflow(deferred->entry(), overflow);
__ bind(deferred->exit());
}
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagI(instr_,
+ instr_->value(),
+ UNSIGNED_INT32);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
+ __ SmiTag(reg, reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness) {
Label slow;
- Register src = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
FPURegister dbl_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
Label done;
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ Xor(src, src, Operand(0x80000000));
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ Xor(src, src, Operand(0x80000000));
+ }
+ __ mtc1(src, dbl_scratch);
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
+ } else {
+ __ mtc1(src, dbl_scratch);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
}
- __ mtc1(src, dbl_scratch);
- __ cvt_d_w(dbl_scratch, dbl_scratch);
+
if (FLAG_inline_new) {
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t1, a3, t0, t2, &slow);
@@ -3919,13 +4247,13 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
@@ -3952,13 +4280,13 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register scratch = scratch0();
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
if (instr->needs_check()) {
STATIC_ASSERT(kHeapObjectTag == 1);
@@ -4023,9 +4351,9 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
+ Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_scratch = double_scratch0();
FPURegister single_scratch = double_scratch.low();
@@ -4042,8 +4370,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if.
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+ Register scratch3 = ToRegister(instr->temp2());
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@@ -4113,13 +4441,13 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LTaggedToI* instr_;
};
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
// Let the deferred code handle the HeapObject case.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -4131,7 +4459,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
@@ -4149,12 +4477,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
- DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
+ Register scratch2 = ToRegister(instr->temp());
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
FPURegister single_scratch = double_scratch0().low();
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->TempAt(1));
+ Register scratch3 = ToRegister(instr->temp2());
__ EmitECMATruncate(result_reg,
double_input,
single_scratch,
@@ -4181,21 +4509,21 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
__ And(at, ToRegister(input), Operand(kSmiTagMask));
DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
__ And(at, ToRegister(input), Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register scratch = scratch0();
__ GetObjectType(input, scratch, scratch);
@@ -4265,7 +4593,7 @@ void LCodeGen::DoCheckMapCommon(Register reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register scratch = scratch0();
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
Label success;
@@ -4284,7 +4612,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
__ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
}
@@ -4300,7 +4628,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
@@ -4332,8 +4660,9 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ ASSERT(instr->temp()->Equals(instr->result()));
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
@@ -4370,11 +4699,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
LAllocateObject* instr_;
};
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
- Register scratch2 = ToRegister(instr->TempAt(1));
+ Register scratch = ToRegister(instr->temp());
+ Register scratch2 = ToRegister(instr->temp2());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
@@ -4442,14 +4772,15 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Heap* heap = isolate()->heap();
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
+ if (CanTransitionToMoreGeneralFastElementsKind(
+ boilerplate_elements_kind, true)) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
// Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
@@ -4462,12 +4793,13 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
a2,
Operand(boilerplate_elements_kind));
}
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+
+ // Set up the parameters to the stub/runtime call.
+ __ LoadHeapObject(a3, literals);
__ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
- __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
+ __ li(a1, Operand(isolate()->factory()->empty_fixed_array()));
__ Push(a3, a2, a1);
// Pick the right runtime function or stub to call.
@@ -4562,8 +4894,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
// We only support little endian mode...
- int32_t value_low = value & 0xFFFFFFFF;
- int32_t value_high = value >> 32;
+ int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
+ int32_t value_high = static_cast<int32_t>(value >> 32);
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ li(a2, Operand(value_low));
@@ -4602,10 +4934,11 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
- // Deopt if the literal boilerplate ElementsKind is of a type different than
- // the expected one. The check isn't necessary if the boilerplate has already
- // been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
+ if (CanTransitionToMoreGeneralFastElementsKind(
+ boilerplate_elements_kind, true)) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
// Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
@@ -4667,7 +5000,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+ ASSERT(ToRegister(instr->value()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
__ push(a0);
CallRuntime(Runtime::kToFastProperties, 1, instr);
@@ -4677,15 +5010,13 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
- // a3 = JS function.
// t3 = literals array.
// a1 = regexp literal.
// a0 = regexp literal clone.
// a2 and t0-t2 are used as temporaries.
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(t3, instr->hydrogen()->literals());
__ lw(a1, FieldMemOperand(t3, literal_offset));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&materialized, ne, a1, Operand(at));
@@ -4751,14 +5082,14 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
__ push(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -4885,7 +5216,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp1 = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -5013,7 +5344,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
EnsureSpaceForLazyDeopt();
@@ -5084,12 +5415,21 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+ __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ jmp(&done);
+
+ __ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ lw(result,
- FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+
+ __ bind(&done);
}
diff --git a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h b/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
index 94bb945..7363eb8 100644
--- a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
+++ b/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
@@ -44,21 +44,24 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : chunk_(chunk),
+ : zone_(info->zone()),
+ chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4),
- deopt_jump_table_(4),
- deoptimization_literals_(8),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- deferred_(8),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -71,6 +74,7 @@ class LCodeGen BASE_EMBEDDED {
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
+ Zone* zone() const { return zone_; }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
@@ -106,7 +110,12 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code);
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagI(LInstruction* instr,
+ LOperand* value,
+ IntegerSignedness signedness);
+
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -124,8 +133,20 @@ class LCodeGen BASE_EMBEDDED {
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
+ MemOperand PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset);
+
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
+ void WriteTranslation(LEnvironment* environment,
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -149,7 +170,7 @@ class LCodeGen BASE_EMBEDDED {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -170,10 +191,10 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
void Comment(const char* format, ...);
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
@@ -239,7 +260,10 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged);
+ bool is_tagged,
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -294,6 +318,10 @@ class LCodeGen BASE_EMBEDDED {
bool deoptimize_on_minus_zero,
LEnvironment* env);
+ void DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand);
+
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -329,7 +357,8 @@ class LCodeGen BASE_EMBEDDED {
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name);
+ Handle<String> name,
+ LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
@@ -347,8 +376,15 @@ class LCodeGen BASE_EMBEDDED {
};
void EnsureSpaceForLazyDeopt();
-
- LChunk* const chunk_;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+ Zone* zone_;
+ LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
index 4a5fbe3..87efae5 100644
--- a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -35,7 +35,7 @@ namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
- moves_(32),
+ moves_(32, owner->zone()),
root_index_(0),
in_cycle_(false),
saved_destination_(NULL) {}
@@ -80,7 +80,7 @@ void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
diff --git a/src/3rdparty/v8/src/mips/lithium-mips.cc b/src/3rdparty/v8/src/mips/lithium-mips.cc
index eab5945..7b71758 100644
--- a/src/3rdparty/v8/src/mips/lithium-mips.cc
+++ b/src/3rdparty/v8/src/mips/lithium-mips.cc
@@ -194,22 +194,22 @@ void LGoto::PrintDataTo(StringStream* stream) {
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
@@ -218,57 +218,57 @@ void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
- InputAt(0)->PrintTo(stream);
- InputAt(1)->PrintTo(stream);
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@@ -278,7 +278,7 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
@@ -292,26 +292,26 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ function()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
@@ -340,17 +340,15 @@ void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
-
stream->Add(" length ");
length()->PrintTo(stream);
-
stream->Add(" index ");
index()->PrintTo(stream);
}
@@ -374,16 +372,7 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -407,146 +396,26 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) {
-}
-
-
-int LChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
- return LDoubleStackSlot::Create(index);
- } else {
- return LStackSlot::Create(index);
- }
-}
-
-
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap);
- index = instructions_.length();
- instructions_.Add(instr);
+ return LDoubleStackSlot::Create(index, zone());
} else {
- index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
- instr->pointer_map()->set_lithium_position(index);
+ return LStackSlot::Create(index, zone());
}
}
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
+LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new(zone()) LChunk(info(), graph());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -561,17 +430,8 @@ LChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LCodeGen::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -740,7 +600,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
@@ -762,7 +622,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
@@ -835,13 +695,16 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
}
}
}
@@ -974,8 +837,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber ||
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result = new(zone()) LEnvironment(
@@ -985,7 +848,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->parameter_count(),
argument_count_,
value_count,
- outer);
+ outer,
+ hydrogen_env->entry(),
+ zone());
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -999,7 +864,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
} else {
op = UseAny(value);
}
- result->AddValue(op, value->representation());
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
@@ -1415,6 +1282,25 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ right = UseOrConstantAtStart(instr->MostConstantOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
@@ -1580,6 +1466,12 @@ LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
}
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
@@ -1595,13 +1487,14 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), a0);
- LDateField* result = new LDateField(object, FixedTemp(a1), instr->index());
- return MarkAsCall(DefineFixed(result, v0), instr);
+ LDateField* result =
+ new(zone()) LDateField(object, FixedTemp(a1), instr->index());
+ return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterAtStart(instr->index());
+ LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
}
@@ -1644,10 +1537,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegisterAtStart(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
- if (!needs_check) {
- res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
+ if (instr->value()->type().IsSmi()) {
+ res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
@@ -1686,7 +1578,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ LNumberTagU* result = new(zone()) LNumberTagU(value);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ } else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
@@ -1694,8 +1589,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else {
ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegister(instr->value())));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(Use(instr->value())));
+ }
}
}
UNREACHABLE();
@@ -1717,10 +1617,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp1 = TempRegister();
+ LUnallocated* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(result);
+ LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+ return AssignEnvironment(Define(result, temp1));
}
@@ -1889,51 +1789,40 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* elements = UseTempRegister(instr->elements());
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
+ LLoadKeyed* result = NULL;
+ if (!instr->is_external()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseTempRegister(instr->elements());
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
+ }
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- Representation representation(instr->representation());
- ASSERT(
- (representation.IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
- AssignEnvironment(load_instr) : load_instr;
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -1947,64 +1836,47 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- Representation representation(instr->value()->representation());
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (representation.IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register
+ LOperand* val = val_is_temp_register || needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstant(instr->key());
+ LStoreKeyed* result = NULL;
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+
+ LOperand* object = NULL;
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ } else {
+ ASSERT(instr->value()->representation().IsTagged());
+ object = UseTempRegister(instr->elements());
+ }
+
+ result = new(zone()) LStoreKeyed(object, key, val);
+ } else {
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->value()->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LStoreKeyed(external_pointer, key, val);
+ }
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ ASSERT(result != NULL);
+ return result;
}
@@ -2023,8 +1895,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
- instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ ElementsKind from_kind = instr->original_map()->elements_kind();
+ ElementsKind to_kind = instr->transitioned_map()->elements_kind();
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
@@ -2045,16 +1918,28 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = needs_write_barrier
- ? UseTempRegister(instr->object())
- : UseRegisterAtStart(instr->object());
+ bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ instr->NeedsWriteBarrierForMap();
+
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = instr->is_in_object()
+ ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map
+ ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
- return new(zone()) LStoreNamedField(obj, val);
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2097,8 +1982,8 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result = new(zone()) LAllocateObject(
- TempRegister(), TempRegister());
+ LAllocateObject* result =
+ new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
@@ -2137,6 +2022,7 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2175,12 +2061,10 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2234,7 +2118,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+ pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
@@ -2260,10 +2144,11 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->function(),
undefined,
instr->call_kind(),
- instr->is_construct());
+ instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
+ inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2275,7 +2160,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* env = current_block_->last_environment();
- if (instr->arguments_pushed()) {
+ if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count;
@@ -2306,8 +2191,7 @@ LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
}
diff --git a/src/3rdparty/v8/src/mips/lithium-mips.h b/src/3rdparty/v8/src/mips/lithium-mips.h
index 62c5398..00e21fd 100644
--- a/src/3rdparty/v8/src/mips/lithium-mips.h
+++ b/src/3rdparty/v8/src/mips/lithium-mips.h
@@ -108,6 +108,7 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -115,7 +116,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -125,17 +125,18 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MapEnumLength) \
+ V(MathMinMax) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
+ V(NumberTagU) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
@@ -153,15 +154,14 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@@ -255,11 +255,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -268,6 +263,15 @@ class LInstruction: public ZoneObject {
#endif
private:
+ // Iterator interface.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -288,16 +292,17 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- int InputCount() { return I; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ virtual int InputCount() { return I; }
+ virtual LOperand* InputAt(int i) { return inputs_[i]; }
+
+ virtual int TempCount() { return T; }
+ virtual LOperand* TempAt(int i) { return temps_[i]; }
};
@@ -332,8 +337,10 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
return parallel_moves_[pos];
}
@@ -513,6 +520,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = elements;
}
+ LOperand* elements() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
@@ -539,16 +548,22 @@ class LModI: public LTemplateInstruction<1, 2, 3> {
// Used for the standard case.
LModI(LOperand* left,
LOperand* right,
- LOperand* temp1,
+ LOperand* temp,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
temps_[2] = temp3;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
@@ -561,6 +576,9 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
@@ -574,6 +592,10 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
@@ -586,6 +608,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
@@ -605,6 +630,9 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
@@ -620,6 +648,9 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
@@ -632,6 +663,8 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = left;
}
+ LOperand* left() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
@@ -644,6 +677,8 @@ class LIsNilAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
@@ -661,6 +696,9 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
@@ -675,6 +713,9 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
@@ -688,6 +729,8 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
@@ -702,6 +745,9 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
@@ -717,6 +763,9 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
@@ -733,6 +782,8 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
@@ -747,6 +798,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
@@ -758,6 +811,8 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
@@ -773,6 +828,9 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
@@ -788,6 +846,9 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -802,6 +863,9 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
@@ -813,6 +877,9 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
@@ -841,6 +908,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@@ -851,6 +919,9 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
@@ -867,7 +938,8 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
}
Token::Value op() const { return op_; }
-
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
@@ -885,6 +957,9 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
@@ -923,6 +998,8 @@ class LBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
@@ -937,6 +1014,9 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
@@ -958,6 +1038,8 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
@@ -969,18 +1051,34 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
"fixed-array-base-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
+class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
@@ -993,6 +1091,9 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
};
@@ -1005,9 +1106,12 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* date() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Smi* index() const { return index_; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
- Smi* index() const { return index_; }
private:
Smi* index_;
@@ -1020,6 +1124,8 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1030,6 +1136,8 @@ class LBitNotI: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
@@ -1041,11 +1149,29 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
+class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
@@ -1053,6 +1179,9 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
@@ -1064,6 +1193,8 @@ class LRandom: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
+ LOperand* global_object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
@@ -1078,6 +1209,8 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
}
Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
@@ -1096,12 +1229,14 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ Token::Value op() const { return op_; }
+
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
- Token::Value op() const { return op_; }
-
private:
Token::Value op_;
};
@@ -1113,6 +1248,8 @@ class LReturn: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1123,6 +1260,8 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
@@ -1134,10 +1273,10 @@ class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
};
@@ -1147,10 +1286,11 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
@@ -1161,10 +1301,10 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
};
@@ -1174,6 +1314,8 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
@@ -1184,73 +1326,47 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
+ bool is_external() const {
+ return hydrogen()->is_external();
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
+ LLoadKeyedGeneric(LOperand* object, LOperand* key) {
+ inputs_[0] = object;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
@@ -1267,10 +1383,11 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
+ LOperand* global_object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
- LOperand* global_object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
@@ -1283,10 +1400,11 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
- LOperand* value() { return inputs_[0]; }
};
@@ -1298,12 +1416,13 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
inputs_[1] = value;
}
+ LOperand* global_object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
- LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(1); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1314,10 +1433,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1331,11 +1451,12 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
inputs_[1] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1348,6 +1469,8 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
@@ -1384,9 +1507,9 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+ LOperand* context() { return inputs_[0]; }
- LOperand* context() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
};
@@ -1404,10 +1527,12 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
qml_global_ = qml_global;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
- LOperand* context() { return InputAt(0); }
bool qml_global() { return qml_global_; }
+
private:
bool qml_global_;
};
@@ -1419,9 +1544,9 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+ LOperand* global_object() { return inputs_[0]; }
- LOperand* global() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
@@ -1443,11 +1568,11 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- LOperand* function() { return inputs_[0]; }
-
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1461,6 +1586,8 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = key;
}
+ LOperand* key() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
@@ -1489,10 +1616,11 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- LOperand* function() { return inputs_[0]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1533,6 +1661,8 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = constructor;
}
+ LOperand* constructor() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1558,28 +1688,60 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
+class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
+class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberTagU(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
public:
- LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
};
@@ -1587,12 +1749,16 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
public:
- LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1604,15 +1770,20 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
public:
LTaggedToI(LOperand* value,
- LOperand* temp1,
+ LOperand* temp,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = value;
- temps_[0] = temp1;
+ temps_[0] = temp;
temps_[1] = temp2;
temps_[2] = temp3;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1626,6 +1797,8 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
@@ -1636,6 +1809,8 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -1648,30 +1823,33 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
+ LOperand* value() { return inputs_[0]; }
bool needs_check() const { return needs_check_; }
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
private:
bool needs_check_;
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
- LStoreNamedField(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
@@ -1681,106 +1859,67 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedGeneric(LOperand* object, LOperand* value) {
+ inputs_[0] = object;
+ inputs_[1] = value;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
+ bool is_external() const { return hydrogen()->is_external(); }
+ LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = val;
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream);
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
inputs_[0] = obj;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- }
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+ virtual void PrintDataTo(StringStream* stream);
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
+ StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1788,21 +1927,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
- LOperand* temp_reg) {
+ LOperand* temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
- temps_[1] = temp_reg;
+ temps_[1] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+ LOperand* temp() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_reg() { return temps_[0]; }
- LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
@@ -1815,11 +1955,11 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
@@ -1831,11 +1971,11 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
@@ -1845,10 +1985,10 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = char_code;
}
+ LOperand* char_code() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
- LOperand* char_code() { return inputs_[0]; }
};
@@ -1858,10 +1998,10 @@ class LStringLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = string;
}
+ LOperand* string() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
};
@@ -1871,7 +2011,7 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
- LOperand* value() { return InputAt(0); }
+ LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
@@ -1884,6 +2024,8 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
};
@@ -1895,18 +2037,23 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
+class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
public:
- LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
+ LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
@@ -1921,6 +2068,8 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
@@ -1931,18 +2080,21 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
public:
- LClampDToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
@@ -1950,8 +2102,8 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
}
LOperand* unclamped() { return inputs_[0]; }
@@ -1962,12 +2114,13 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -1975,11 +2128,14 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
public:
- LAllocateObject(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
+ LAllocateObject(LOperand* temp, LOperand* temp2) {
+ temps_[0] = temp;
temps_[1] = temp2;
}
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
@@ -2028,6 +2184,8 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
@@ -2039,6 +2197,8 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2049,6 +2209,8 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
@@ -2064,6 +2226,8 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
@@ -2071,15 +2235,15 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
+ LDeleteProperty(LOperand* object, LOperand* key) {
+ inputs_[0] = object;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
};
@@ -2189,63 +2353,13 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LChunk: public ZoneObject {
+class LPlatformChunk: public LChunk {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph);
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
- }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2255,7 +2369,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->isolate()->zone()),
+ zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@@ -2264,10 +2378,10 @@ class LChunkBuilder BASE_EMBEDDED {
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
- LChunk* Build();
+ LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
@@ -2282,7 +2396,7 @@ class LChunkBuilder BASE_EMBEDDED {
ABORTED
};
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
@@ -2292,7 +2406,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2383,7 +2497,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
@@ -2395,7 +2509,7 @@ class LChunkBuilder BASE_EMBEDDED {
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
+ BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc b/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
index 2c2445b..aebfe73 100644
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
+++ b/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
@@ -361,28 +361,29 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
scratch, Operand(zero_reg));
#endif
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
lw(scratch, FieldMemOperand(scratch, offset));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the global_context_map.
+ // Read the first word and compare to the native_context_map.
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kGlobalContextMapRootIndex);
- Check(eq, "JSGlobalObject::global_context should be a global context.",
+ LoadRoot(at, Heap::kNativeContextMapRootIndex);
+ Check(eq, "JSGlobalObject::native_context should be a native context.",
holder_reg, Operand(at));
pop(holder_reg); // Restore holder.
}
// Check if both contexts are the same.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
Branch(&same_contexts, eq, scratch, Operand(at));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
@@ -392,13 +393,13 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
holder_reg, Operand(at));
lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kGlobalContextMapRootIndex);
- Check(eq, "JSGlobalObject::global_context should be a global context.",
+ LoadRoot(at, Heap::kNativeContextMapRootIndex);
+ Check(eq, "JSGlobalObject::native_context should be a native context.",
holder_reg, Operand(at));
// Restore at is not needed. at is reloaded below.
pop(holder_reg); // Restore holder.
// Restore at to holder's context.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
}
// Check that the security token in the calling global object is
@@ -2559,7 +2560,7 @@ void MacroAssembler::Call(Address target,
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond,
Register rs,
const Operand& rt,
@@ -2571,7 +2572,7 @@ int MacroAssembler::CallSize(Handle<Code> code,
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond,
Register rs,
const Operand& rt,
@@ -2580,7 +2581,7 @@ void MacroAssembler::Call(Handle<Code> code,
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
@@ -3341,33 +3342,39 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ Branch(fail, hi, scratch,
+ Operand(Map::kMaximumBitField2FastHoleyElementValue));
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, ls, scratch,
- Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastElementValue));
+ Operand(Map::kMaximumBitField2FastHoleyElementValue));
}
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
}
@@ -3438,7 +3445,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
destination = FloatingPointHelper::kCoreRegisters;
}
- Register untagged_value = receiver_reg;
+ Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(this,
untagged_value,
@@ -3467,28 +3474,33 @@ void MacroAssembler::CompareMapAndBranch(Register obj,
Label* branch_to,
CompareMapMode mode) {
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMapAndBranch(scratch, map, early_success, cond, branch_to, mode);
+}
+
+
+void MacroAssembler::CompareMapAndBranch(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to,
+ CompareMapMode mode) {
Operand right = Operand(map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- Map* transitioned_fast_element_map(
- map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
- ASSERT(transitioned_fast_element_map == NULL ||
- map->elements_kind() != FAST_ELEMENTS);
- if (transitioned_fast_element_map != NULL) {
- Branch(early_success, eq, scratch, right);
- right = Operand(Handle<Map>(transitioned_fast_element_map));
- }
-
- Map* transitioned_double_map(
- map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
- ASSERT(transitioned_double_map == NULL ||
- map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
- if (transitioned_double_map != NULL) {
- Branch(early_success, eq, scratch, right);
- right = Operand(Handle<Map>(transitioned_double_map));
+ ElementsKind kind = map->elements_kind();
+ if (IsFastElementsKind(kind)) {
+ bool packed = IsFastPackedElementsKind(kind);
+ Map* current_map = *map;
+ while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
+ kind = GetNextMoreGeneralFastElementsKind(kind, packed);
+ current_map = current_map->LookupElementsTransitionMap(kind);
+ if (!current_map) break;
+ Branch(early_success, eq, obj_map, right);
+ right = Operand(Handle<Map>(current_map));
+ }
}
}
- Branch(branch_to, cond, scratch, right);
+ Branch(branch_to, cond, obj_map, right);
}
@@ -3900,7 +3912,8 @@ void MacroAssembler::CallStub(CodeStub* stub,
const Operand& r2,
BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2, bd);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(),
+ cond, r1, r2, bd);
}
@@ -4270,7 +4283,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
lw(target, FieldMemOperand(target,
@@ -4439,31 +4452,43 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ lw(scratch,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- int expected_index =
- Context::GetContextMapIndexFromElementsKind(expected_kind);
- lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ lw(scratch,
+ MemOperand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+ size_t offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ lw(at, FieldMemOperand(scratch, offset));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
- int trans_index =
- Context::GetContextMapIndexFromElementsKind(transitioned_kind);
- lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ lw(map_in_out, FieldMemOperand(scratch, offset));
}
void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch, Register map_out) {
+ Register function_in, Register scratch,
+ Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
lw(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
+ ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ kind,
+ map_out,
+ scratch,
+ &done);
+ } else if (can_have_holes) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_HOLEY_SMI_ELEMENTS,
map_out,
scratch,
&done);
@@ -4474,11 +4499,12 @@ void MacroAssembler::LoadInitialArrayMap(
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
+ lw(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
lw(function, FieldMemOperand(function,
- GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
lw(function, MemOperand(function, Context::SlotOffset(index)));
}
@@ -4772,38 +4798,46 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
}
-void MacroAssembler::AbortIfSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Assert(ne, "Operand is a smi", at, Operand(zero_reg));
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(ne, "Operand is a smi", at, Operand(zero_reg));
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Assert(eq, "Operand is a smi", at, Operand(zero_reg));
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(eq, "Operand is a smi", at, Operand(zero_reg));
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
- Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
- push(object);
- lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
- pop(object);
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ And(t0, object, Operand(kSmiTagMask));
+ Check(ne, "Operand is a smi and not a string", t0, Operand(zero_reg));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Check(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+ pop(object);
+ }
}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- ASSERT(!src.is(at));
- LoadRoot(at, root_value_index);
- Assert(eq, message, src, Operand(at));
+void MacroAssembler::AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ if (emit_debug_code()) {
+ ASSERT(!src.is(at));
+ LoadRoot(at, root_value_index);
+ Check(eq, message, src, Operand(at));
+ }
}
@@ -5238,7 +5272,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
lw(t9, FieldMemOperand(value, String::kLengthOffset));
And(t8, instance_type, Operand(kStringEncodingMask));
@@ -5269,54 +5303,54 @@ void MacroAssembler::EnsureNotWhite(
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- lw(descriptors,
- FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, &not_smi);
- LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
- bind(&not_smi);
+ lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
}
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next;
- // Preload a couple of values used in the loop.
Register empty_fixed_array_value = t2;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = t3;
- LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- mov(a1, a0);
- bind(&next);
+ Label next, start;
+ mov(a2, a0);
- // Check that there are no elements. Register a1 contains the
- // current JS object we've reached through the prototype chain.
- lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
- Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+
+ EnumLength(a3, a1);
+ Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in a2 for the subsequent
- // prototype load.
- lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(a3, call_runtime);
+ jmp(&start);
- // Check that there is an enum cache in the non-empty instance
- // descriptors (a3). This is the case if the next enumeration
- // index field does not contain a smi.
- lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(a3, call_runtime);
+ bind(&next);
+ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- Branch(&check_prototype, eq, a1, Operand(a0));
- lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
- Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
+ EnumLength(a3, a1);
+ Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
+
+ bind(&start);
+
+ // Check that there are no elements. Register r2 contains the current JS
+ // object we've reached through the prototype chain.
+ lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
+ Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
- lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
- Branch(&next, ne, a1, Operand(null_value));
+ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+ Branch(&next, ne, a2, Operand(null_value));
}
@@ -5357,7 +5391,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// In 0-255 range, round and truncate.
bind(&in_bounds);
- round_w_d(temp_double_reg, input_reg);
+ cvt_w_d(temp_double_reg, input_reg);
mfc1(result_reg, temp_double_reg);
bind(&done);
}
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.h b/src/3rdparty/v8/src/mips/macro-assembler-mips.h
index b5897e4..8b7d7c1 100644
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.h
+++ b/src/3rdparty/v8/src/mips/macro-assembler-mips.h
@@ -108,12 +108,12 @@ inline MemOperand ContextOperand(Register context, int index) {
inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
+ return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
}
static inline MemOperand QmlGlobalObjectOperand() {
- return ContextOperand(cp, Context::QML_GLOBAL_INDEX);
+ return ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX);
}
@@ -187,11 +187,11 @@ class MacroAssembler: public Assembler {
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
static int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -811,8 +811,8 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the global context if the map in register
- // map_in_out is the cached Array map in the global context of
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@@ -824,7 +824,8 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
- Register map_out);
+ Register map_out,
+ bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
@@ -966,9 +967,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail);
+ void CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
@@ -977,6 +978,7 @@ class MacroAssembler: public Assembler {
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
+ // All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
@@ -996,6 +998,15 @@ class MacroAssembler: public Assembler {
Label* branch_to,
CompareMapMode mode = REQUIRE_EXACT_MAP);
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMapAndBranch(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
@@ -1330,17 +1341,18 @@ class MacroAssembler: public Assembler {
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
- void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ // Abort execution if argument is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
// ---------------------------------------------------------------------------
// HeapNumber utilities.
@@ -1392,7 +1404,16 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
-
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const int shift = Field::kShift;
+ static const int mask = (Field::kMask >> shift) << kSmiTagSize;
+ srl(reg, reg, shift);
+ And(reg, reg, Operand(mask));
+ }
// Activation support.
void EnterFrame(StackFrame::Type type);
diff --git a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
index c48bcc4..672ba0e 100644
--- a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -43,44 +43,49 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
+ * - t7 : Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
* - t1 : Pointer to current code object (Code*) including heap object tag.
* - t2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - t3 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
- * - t4 : points to tip of backtrack stack
+ * - t4 : Points to tip of backtrack stack
* - t5 : Unused.
* - t6 : End of input (points to byte after last character in input).
* - fp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
- * - sp : points to tip of C stack.
+ * - sp : Points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
*
- * - fp[56] direct_call (if 1, direct call from JavaScript code,
+ * - fp[64] Isolate* isolate (address of the current isolate)
+ * - fp[60] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[52] stack_area_base (High end of the memory area to use as
+ * - fp[56] stack_area_base (High end of the memory area to use as
* backtracking stack).
+ * - fp[52] capture array size (may fit multiple sets of matches)
* - fp[48] int* capture_array (int[num_saved_registers_], for output).
* - fp[44] secondary link/return address used by native call.
* --- sp when called ---
- * - fp[40] return address (lr).
- * - fp[36] old frame pointer (r11).
+ * - fp[40] return address (lr).
+ * - fp[36] old frame pointer (r11).
* - fp[0..32] backup of registers s0..s7.
* --- frame pointer ----
- * - fp[-4] end of input (Address of end of string).
- * - fp[-8] start of input (Address of first character in string).
+ * - fp[-4] end of input (address of end of string).
+ * - fp[-8] start of input (address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] Offset of location before start of input (effectively character
+ * - fp[-20] success counter (only for global regexps to count matches).
+ * - fp[-24] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
- * - fp[-24] At start (if 1, we are starting at the start of the
+ * - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
- * - fp[-28] register 0 (Only positions must be stored in the first
+ * - fp[-32] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -114,8 +119,10 @@ namespace internal {
RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
Mode mode,
- int registers_to_save)
- : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -201,8 +208,8 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kAtStart));
- BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
+ __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
+ BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart));
@@ -214,8 +221,8 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kAtStart));
- BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
+ __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart));
__ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
@@ -444,13 +451,6 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
}
-void RegExpMacroAssemblerMIPS::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- UNIMPLEMENTED_MIPS();
-}
-
-
void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
Label* on_not_equal) {
BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
@@ -640,6 +640,7 @@ void RegExpMacroAssemblerMIPS::Fail() {
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
+ Label return_v0;
if (masm_->has_exception()) {
// If the code gets corrupted due to long regular expressions and lack of
// space on trampolines, an internal exception flag is set. If this case
@@ -669,8 +670,9 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
+ __ mov(a0, zero_reg);
+ __ push(a0); // Make room for success counter and initialize it to 0.
__ push(a0); // Make room for "position - 1" constant (value irrelevant).
- __ push(a0); // Make room for "at start" constant (value irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -689,12 +691,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ li(v0, Operand(EXCEPTION));
- __ jmp(&exit_label_);
+ __ jmp(&return_v0);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(a0);
// If returned value is non-zero, we exit with the returned value as result.
- __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+ __ Branch(&return_v0, ne, v0, Operand(zero_reg));
__ bind(&stack_ok);
// Allocate space on stack for registers.
@@ -715,39 +717,44 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// position registers.
__ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
- // Determine whether the start index is zero, that is at the start of the
- // string, and store that value in a local variable.
- __ mov(t5, a1);
- __ li(a1, Operand(1));
- __ Movn(a1, zero_reg, t5);
- __ sw(a1, MemOperand(frame_pointer(), kAtStart));
+ // Initialize code pointer register
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ // Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1.
-
- // Address of register 0.
- __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
- __ li(a2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ sw(a0, MemOperand(a1));
- __ Addu(a1, a1, Operand(-kPointerSize));
- __ Subu(a2, a2, Operand(1));
- __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+ if (num_saved_registers_ > 8) {
+ // Address of register 0.
+ __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
+ __ li(a2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ sw(a0, MemOperand(a1));
+ __ Addu(a1, a1, Operand(-kPointerSize));
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ sw(a0, register_location(i));
+ }
+ }
}
// Initialize backtrack stack pointer.
__ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
- // Initialize code pointer register
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- // Load previous char as initial value of current character register.
- Label at_start;
- __ lw(a0, MemOperand(frame_pointer(), kAtStart));
- __ Branch(&at_start, ne, a0, Operand(zero_reg));
- LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
- __ jmp(&start_label_);
- __ bind(&at_start);
- __ li(current_character(), Operand('\n'));
+
__ jmp(&start_label_);
@@ -776,6 +783,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ lw(a2, register_location(i));
__ lw(a3, register_location(i + 1));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in a4 for the zero-length check later.
+ __ mov(t7, a2);
+ }
if (mode_ == UC16) {
__ sra(a2, a2, 1);
__ Addu(a2, a2, a1);
@@ -791,10 +802,57 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Addu(a0, a0, kPointerSize);
}
}
- __ li(v0, Operand(SUCCESS));
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ lw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ lw(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ Addu(a0, a0, 1);
+ __ sw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Subu(a1, a1, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ __ mov(v0, a0);
+ __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
+
+ __ sw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ Addu(a2, a2, num_saved_registers_ * kPointerSize);
+ __ sw(a2, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare a0 to initialize registers with its value in the next run.
+ __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // t7: capture start index
+ // Not a zero-length match, restart.
+ __ Branch(
+ &load_char_start_regexp, ne, current_input_offset(), Operand(t7));
+ // Offset from the end is zero if we already reached the end.
+ __ Branch(&exit_label_, eq, current_input_offset(),
+ Operand(zero_reg));
+ // Advance current position after a zero-length match.
+ __ Addu(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ Branch(&load_char_start_regexp);
+ } else {
+ __ li(v0, Operand(SUCCESS));
+ }
}
// Exit and return v0.
__ bind(&exit_label_);
+ if (global()) {
+ __ lw(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_v0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
@@ -820,7 +878,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ MultiPop(regexp_registers_to_retain);
// If returning non-zero, we should end execution with the given
// result as return value.
- __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+ __ Branch(&return_v0, ne, v0, Operand(zero_reg));
// String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@@ -864,7 +922,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ li(v0, Operand(EXCEPTION));
- __ jmp(&exit_label_);
+ __ jmp(&return_v0);
}
}
@@ -1012,8 +1070,9 @@ void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
}
-void RegExpMacroAssemblerMIPS::Succeed() {
+bool RegExpMacroAssemblerMIPS::Succeed() {
__ jmp(&success_label_);
+ return global();
}
@@ -1044,6 +1103,11 @@ void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
}
+bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
+ return false;
+}
+
+
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
@@ -1280,8 +1344,9 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
- __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
- offset = a0;
+ // t7 is not being used to store the capture start index at this point.
+ __ Addu(t7, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = t7;
}
// We assume that we cannot do unaligned loads on MIPS, so this function
// must only be used to load a single character at a time.
diff --git a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h
index d167f62..8dd52a4 100644
--- a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.h
@@ -38,16 +38,10 @@
namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerMIPS: public RegExpMacroAssembler {
- public:
- RegExpMacroAssemblerMIPS();
- virtual ~RegExpMacroAssemblerMIPS();
-};
-#else // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save);
+ RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerMIPS();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
@@ -72,7 +66,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
@@ -115,10 +108,11 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
+ virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
@@ -141,7 +135,8 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kStackFrameHeader = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 20;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@@ -153,10 +148,10 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kInputStartMinusOne = kInputString - kPointerSize;
- static const int kAtStart = kInputStartMinusOne - kPointerSize;
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kAtStart - kPointerSize;
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/3rdparty/v8/src/mips/simulator-mips.cc b/src/3rdparty/v8/src/mips/simulator-mips.cc
index f31ce7e..cf87f93 100644
--- a/src/3rdparty/v8/src/mips/simulator-mips.cc
+++ b/src/3rdparty/v8/src/mips/simulator-mips.cc
@@ -1502,10 +1502,15 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
}
- double result = target(arg0, arg1, arg2, arg3);
if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
- SetFpResult(result);
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ double result = target(arg0, arg1, arg2, arg3);
+ SetFpResult(result);
} else {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ uint64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
int32_t gpreg_pair[2];
memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
set_register(v0, gpreg_pair[0]);
@@ -2063,10 +2068,15 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// Rounding modes are not yet supported.
ASSERT((FCSR_ & 3) == 0);
// In rounding mode 0 it should behave like ROUND.
- case ROUND_W_D: // Round double to word.
+ case ROUND_W_D: // Round double to word (round half to even).
{
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ double rounded = floor(fs + 0.5);
int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
set_fpu_register(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPUInvalidResult);
diff --git a/src/3rdparty/v8/src/mips/simulator-mips.h b/src/3rdparty/v8/src/mips/simulator-mips.h
index 1e72939..776badc 100644
--- a/src/3rdparty/v8/src/mips/simulator-mips.h
+++ b/src/3rdparty/v8/src/mips/simulator-mips.h
@@ -50,16 +50,16 @@ namespace internal {
entry(p0, p1, p2, p3, p4)
typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, Address, int, Isolate*);
+ void*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7))
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -403,9 +403,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
- entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+ entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/src/3rdparty/v8/src/mips/stub-cache-mips.cc b/src/3rdparty/v8/src/mips/stub-cache-mips.cc
index 18a5f5f..bd15775 100644
--- a/src/3rdparty/v8/src/mips/stub-cache-mips.cc
+++ b/src/3rdparty/v8/src/mips/stub-cache-mips.cc
@@ -270,11 +270,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
- __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
__ lw(prototype,
- FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ __ lw(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
__ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ lw(prototype,
@@ -291,13 +292,14 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
- __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(prototype,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ASSERT(!prototype.is(at));
- __ li(at, isolate->global());
+ __ li(at, isolate->global_object());
__ Branch(miss, ne, prototype, Operand(at));
// Get the global function with the given index.
Handle<JSFunction> function(
- JSFunction::cast(isolate->global_context()->get(index)));
+ JSFunction::cast(isolate->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ li(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -422,21 +424,59 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
int index,
Handle<Map> transition,
+ Handle<String> name,
Register receiver_reg,
Register name_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label) {
// a0 : value.
Label exit;
+
+ LookupResult lookup(masm->isolate());
+ object->Lookup(*name, &lookup);
+ if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
+ // In sloppy mode, we could just return the value and be done. However, we
+ // might be in strict mode, where we have to throw. Since we cannot tell,
+ // go into slow case unconditionally.
+ __ jmp(miss_label);
+ return;
+ }
+
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+ __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Check that we are allowed to write this.
+ if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ JSObject* holder;
+ if (lookup.IsFound()) {
+ holder = lookup.holder();
+ } else {
+ // Find the top object.
+ holder = *object;
+ do {
+ holder = JSObject::cast(holder->GetPrototype());
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+ // We need an extra register, push
+ __ push(name_reg);
+ Label miss_pop, done_check;
+ CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
+ scratch1, scratch2, name, &miss_pop);
+ __ jmp(&done_check);
+ __ bind(&miss_pop);
+ __ pop(name_reg);
+ __ jmp(miss_label);
+ __ bind(&done_check);
+ __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -458,10 +498,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
if (!transition.is_null()) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ li(t0, Operand(transition));
- __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ // Update the map of the object.
+ __ li(scratch1, Operand(transition));
+ __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field and pass the now unused
+ // name_reg as scratch register.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ name_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
}
// Adjust for the number of properties stored in the object. Even in the
@@ -475,7 +525,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ sw(a0, FieldMemOperand(receiver_reg, offset));
// Skip updating write barrier if storing a smi.
- __ JumpIfSmi(a0, &exit, scratch);
+ __ JumpIfSmi(a0, &exit, scratch1);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
@@ -483,15 +533,16 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ RecordWriteField(receiver_reg,
offset,
name_reg,
- scratch,
+ scratch1,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array.
- __ lw(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ sw(a0, FieldMemOperand(scratch, offset));
+ __ lw(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ sw(a0, FieldMemOperand(scratch1, offset));
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(a0, &exit);
@@ -499,7 +550,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, a0);
- __ RecordWriteField(scratch,
+ __ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
@@ -1185,6 +1236,44 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
+void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
+ ASSERT(!receiver.is(scratch1));
+ ASSERT(!receiver.is(scratch2));
+ ASSERT(!receiver.is(scratch3));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch1;
+ __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ miss,
+ &probe_done,
+ dictionary,
+ name_reg,
+ scratch2,
+ scratch3);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3;
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lw(scratch2, FieldMemOperand(pointer, kValueOffset));
+ __ Branch(miss, ne, scratch2, Operand(callback));
+}
+
+
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1192,6 +1281,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@@ -1202,6 +1292,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ GenerateDictionaryLoadCallback(
+ reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
+ }
+
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
@@ -1269,12 +1364,13 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
}
}
@@ -1341,7 +1437,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), v0, holder_reg,
@@ -1481,7 +1577,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -1585,16 +1681,29 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiOnlyElements(a3, t3, &call_builtin);
+ __ CheckFastSmiElements(a3, t3, &call_builtin);
// edx: receiver
// r3: map
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ Label try_holey_map;
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
a3,
t3,
+ &try_holey_map);
+ __ mov(a2, receiver);
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
+ __ jmp(&fast_object);
+
+ __ bind(&try_holey_map);
+ __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
+ FAST_HOLEY_ELEMENTS,
+ a3,
+ t3,
&call_builtin);
__ mov(a2, receiver);
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(a3, a3, &call_builtin);
@@ -2015,7 +2124,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2149,7 +2258,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2250,7 +2359,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2471,7 +2580,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2530,7 +2639,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2547,21 +2656,30 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
Label miss;
// Name register might be clobbered.
- GenerateStoreField(masm(), object, index, transition, a1, a2, a3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ a1, a2, a3, t0,
+ &miss);
__ bind(&miss);
__ li(a2, Operand(Handle<String>(name))); // Restore name.
Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -2569,19 +2687,13 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- ra : return address
// -----------------------------------
Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(a1, a3, Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(a1, a3, &miss);
- }
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(a1, &miss, a3);
+ CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
// Stub never generated for non-global objects that require access
// checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(a1); // Receiver.
__ li(a3, Operand(callback)); // Callback info.
@@ -2599,7 +2711,81 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(a0);
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ __ push(a1);
+ __ push(a0);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(v0);
+
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(a1, &miss);
+ CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss);
+
+ GenerateStoreViaSetter(masm(), setter);
+
+ __ bind(&miss);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2644,7 +2830,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2689,7 +2875,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2723,7 +2909,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@@ -2745,7 +2931,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2760,13 +2946,76 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -- ra : return address
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, callback, name,
+ GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, t1, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(a0);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(a0, &miss);
+ CheckPrototypes(receiver, a0, holder, a3, t0, a1, name, &miss);
+
+ GenerateLoadViaGetter(masm(), getter);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2786,7 +3035,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2809,7 +3058,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2850,7 +3099,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2872,7 +3121,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2891,12 +3140,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
// Check the key is the cached one.
__ Branch(&miss, ne, a0, Operand(name));
- GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, callback, name,
- &miss);
+ GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, t1, callback,
+ name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2920,7 +3169,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2945,7 +3194,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2965,7 +3214,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2990,7 +3239,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3014,7 +3263,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
__ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3034,7 +3283,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3061,7 +3310,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3086,7 +3335,13 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
// a3 is used as scratch register. a1 and a2 keep their values if a jump to
// the miss label is generated.
- GenerateStoreField(masm(), object, index, transition, a2, a1, a3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ a2, a1, a3, t0,
+ &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
@@ -3094,7 +3349,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
@@ -3118,7 +3375,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3156,7 +3413,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3196,7 +3453,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// t7: undefined
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Check(ne, "Function constructed by construct stub.",
- a3, Operand(JS_FUNCTION_TYPE));
+ a3, Operand(JS_FUNCTION_TYPE));
#endif
// Now allocate the JSObject in new space.
@@ -3204,7 +3461,13 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// a1: constructor function
// a2: initial map
// t7: undefined
+ ASSERT(function->has_initial_map());
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+#ifdef DEBUG
+ int instance_size = function->initial_map()->instance_size();
+ __ Check(eq, "Instance size of initial map changed.",
+ a3, Operand(instance_size >> kPointerSizeLog2));
+#endif
__ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
@@ -3267,7 +3530,6 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -3372,9 +3634,12 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3508,8 +3773,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3676,7 +3944,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ li(t0, 0x7ff);
__ Xor(t1, t5, Operand(0xFF));
__ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
- __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
+ __ Branch(&exponent_rebiased, eq, t1, Operand(zero_reg));
// Rebias exponent.
__ Addu(t5,
@@ -3869,8 +4137,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3934,8 +4205,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3970,7 +4244,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ xor_(t1, t6, t5);
__ li(t2, kBinary32ExponentMask);
__ Movz(t6, t2, t1); // Only if t6 is equal to t5.
- __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
+ __ Branch(&nan_or_infinity_or_zero, eq, t1, Operand(zero_reg));
// Rebias exponent.
__ srl(t6, t6, HeapNumber::kExponentShift);
@@ -4001,7 +4275,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ bind(&done);
__ sll(t9, key, 1);
- __ addu(t9, a2, t9);
+ __ addu(t9, a3, t9);
__ sw(t3, MemOperand(t9, 0));
// Entry registers are intact, a0 holds the value which is the return
@@ -4019,7 +4293,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ or_(t3, t6, t4);
__ Branch(&done);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, t0, 3);
+ __ sll(t8, key, 2);
__ addu(t8, a3, t8);
// t8: effective address of destination element.
__ sw(t4, MemOperand(t8, 0));
@@ -4106,8 +4380,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -4286,7 +4563,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
@@ -4314,7 +4591,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&finish_store);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
__ Addu(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4323,7 +4600,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ Addu(scratch, scratch, scratch2);
__ sw(value_reg, MemOperand(scratch));
} else {
- ASSERT(elements_kind == FAST_ELEMENTS);
+ ASSERT(IsFastObjectElementsKind(elements_kind));
__ Addu(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4332,7 +4609,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ Addu(scratch, scratch, scratch2);
__ sw(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
- ASSERT(elements_kind == FAST_ELEMENTS);
__ RecordWrite(elements_reg, // Object.
scratch, // Address.
receiver_reg, // Value.
@@ -4477,6 +4753,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
+ // All registers after this are overwritten.
elements_reg,
scratch1,
scratch2,
diff --git a/src/3rdparty/v8/src/mirror-debugger.js b/src/3rdparty/v8/src/mirror-debugger.js
index c7f0dcc..a5331a0 100644
--- a/src/3rdparty/v8/src/mirror-debugger.js
+++ b/src/3rdparty/v8/src/mirror-debugger.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -154,6 +154,7 @@ var FUNCTION_TYPE = 'function';
var REGEXP_TYPE = 'regexp';
var ERROR_TYPE = 'error';
var PROPERTY_TYPE = 'property';
+var INTERNAL_PROPERTY_TYPE = 'internalProperty';
var FRAME_TYPE = 'frame';
var SCRIPT_TYPE = 'script';
var CONTEXT_TYPE = 'context';
@@ -176,10 +177,8 @@ PropertyType.ConstantFunction = 2;
PropertyType.Callbacks = 3;
PropertyType.Handler = 4;
PropertyType.Interceptor = 5;
-PropertyType.MapTransition = 6;
-PropertyType.ExternalArrayTransition = 7;
-PropertyType.ConstantTransition = 8;
-PropertyType.NullDescriptor = 9;
+PropertyType.Transition = 6;
+PropertyType.Nonexistent = 7;
// Different attributes for a property.
@@ -214,6 +213,7 @@ var ScopeType = { Global: 0,
// - RegExpMirror
// - ErrorMirror
// - PropertyMirror
+// - InternalPropertyMirror
// - FrameMirror
// - ScriptMirror
@@ -360,6 +360,15 @@ Mirror.prototype.isProperty = function() {
/**
+ * Check whether the mirror reflects an internal property.
+ * @returns {boolean} True if the mirror reflects an internal property
+ */
+Mirror.prototype.isInternalProperty = function() {
+ return this instanceof InternalPropertyMirror;
+};
+
+
+/**
* Check whether the mirror reflects a stack frame.
* @returns {boolean} True if the mirror reflects a stack frame
*/
@@ -596,23 +605,6 @@ ObjectMirror.prototype.protoObject = function() {
};
-/**
- * Return the primitive value if this is object of Boolean, Number or String
- * type (but not Date). Otherwise return undefined.
- */
-ObjectMirror.prototype.primitiveValue = function() {
- if (!IS_STRING_WRAPPER(this.value_) && !IS_NUMBER_WRAPPER(this.value_) &&
- !IS_BOOLEAN_WRAPPER(this.value_)) {
- return void 0;
- }
- var primitiveValue = %_ValueOf(this.value_);
- if (IS_UNDEFINED(primitiveValue)) {
- return void 0;
- }
- return MakeMirror(primitiveValue);
-};
-
-
ObjectMirror.prototype.hasNamedInterceptor = function() {
// Get information on interceptors for this object.
var x = %GetInterceptorInfo(this.value_);
@@ -703,7 +695,7 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
* Return the properties for this object as an array of PropertyMirror objects.
* @param {number} kind Indicate whether named, indexed or both kinds of
* properties are requested
- * @param {number} limit Limit the number of properties returend to the
+ * @param {number} limit Limit the number of properties returned to the
specified value
* @return {Array} Property mirrors for this object
*/
@@ -718,6 +710,16 @@ ObjectMirror.prototype.properties = function(kind, limit) {
};
+/**
+ * Return the internal properties for this object as an array of
+ * InternalPropertyMirror objects.
+ * @return {Array} Property mirrors for this object
+ */
+ObjectMirror.prototype.internalProperties = function() {
+ return ObjectMirror.GetInternalProperties(this.value_);
+}
+
+
ObjectMirror.prototype.property = function(name) {
var details = %DebugGetPropertyDetails(this.value_, %ToString(name));
if (details) {
@@ -792,6 +794,37 @@ ObjectMirror.prototype.toText = function() {
/**
+ * Return the internal properties of the value, such as [[PrimitiveValue]] of
+ * scalar wrapper objects and properties of the bound function.
+ * This method is done static to be accessible from Debug API with the bare
+ * values without mirrors.
+ * @return {Array} array (possibly empty) of InternalProperty instances
+ */
+ObjectMirror.GetInternalProperties = function(value) {
+ if (IS_STRING_WRAPPER(value) || IS_NUMBER_WRAPPER(value) ||
+ IS_BOOLEAN_WRAPPER(value)) {
+ var primitiveValue = %_ValueOf(value);
+ return [new InternalPropertyMirror("[[PrimitiveValue]]", primitiveValue)];
+ } else if (IS_FUNCTION(value)) {
+ var bindings = %BoundFunctionGetBindings(value);
+ var result = [];
+ if (bindings && IS_ARRAY(bindings)) {
+ result.push(new InternalPropertyMirror("[[TargetFunction]]",
+ bindings[0]));
+ result.push(new InternalPropertyMirror("[[BoundThis]]", bindings[1]));
+ var boundArgs = [];
+ for (var i = 2; i < bindings.length; i++) {
+ boundArgs.push(bindings[i]);
+ }
+ result.push(new InternalPropertyMirror("[[BoundArgs]]", boundArgs));
+ }
+ return result;
+ }
+ return [];
+}
+
+
+/**
* Mirror object for functions.
* @param {function} value The function object reflected by this mirror.
* @constructor
@@ -1270,6 +1303,33 @@ PropertyMirror.prototype.isNative = function() {
};
+/**
+ * Mirror object for internal properties. Internal property reflects properties
+ * not accessible from user code such as [[BoundThis]] in bound function.
+ * Their names are merely symbolic.
+ * @param {string} name The name of the property
+ * @param {value} property value
+ * @constructor
+ * @extends Mirror
+ */
+function InternalPropertyMirror(name, value) {
+ %_CallFunction(this, INTERNAL_PROPERTY_TYPE, Mirror);
+ this.name_ = name;
+ this.value_ = value;
+}
+inherits(InternalPropertyMirror, Mirror);
+
+
+InternalPropertyMirror.prototype.name = function() {
+ return this.name_;
+};
+
+
+InternalPropertyMirror.prototype.value = function() {
+ return MakeMirror(this.value_, false);
+};
+
+
var kFrameDetailsFrameIdIndex = 0;
var kFrameDetailsReceiverIndex = 1;
var kFrameDetailsFunctionIndex = 2;
@@ -1750,6 +1810,15 @@ FrameMirror.prototype.localsText = function() {
};
+FrameMirror.prototype.restart = function() {
+ var result = %LiveEditRestartFrame(this.break_id_, this.index_);
+ if (IS_UNDEFINED(result)) {
+ result = "Failed to find requested frame";
+ }
+ return result;
+};
+
+
FrameMirror.prototype.toText = function(opt_locals) {
var result = '';
result += '#' + (this.index() <= 9 ? '0' : '') + this.index();
@@ -2195,7 +2264,8 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
break;
case PROPERTY_TYPE:
- throw new Error('PropertyMirror cannot be serialized independeltly');
+ case INTERNAL_PROPERTY_TYPE:
+ throw new Error('PropertyMirror cannot be serialized independently');
break;
case FRAME_TYPE:
@@ -2271,7 +2341,8 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
* "prototypeObject":{"ref":<number>},
* "namedInterceptor":<boolean>,
* "indexedInterceptor":<boolean>,
- * "properties":[<properties>]}
+ * "properties":[<properties>],
+ * "internalProperties":[<internal properties>]}
*/
JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
details) {
@@ -2282,11 +2353,6 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
content.protoObject = this.serializeReference(mirror.protoObject());
content.prototypeObject = this.serializeReference(mirror.prototypeObject());
- var primitiveValue = mirror.primitiveValue();
- if (!IS_UNDEFINED(primitiveValue)) {
- content.primitiveValue = this.serializeReference(primitiveValue);
- }
-
// Add flags to indicate whether there are interceptors.
if (mirror.hasNamedInterceptor()) {
content.namedInterceptor = true;
@@ -2348,6 +2414,15 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
}
content.properties = p;
+
+ var internalProperties = mirror.internalProperties();
+ if (internalProperties.length > 0) {
+ var ip = [];
+ for (var i = 0; i < internalProperties.length; i++) {
+ ip.push(this.serializeInternalProperty_(internalProperties[i]));
+ }
+ content.internalProperties = ip;
+ }
};
@@ -2415,6 +2490,33 @@ JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
};
+/**
+ * Serialize internal property information to the following JSON format for
+ * building the array of properties.
+ *
+ * {"name":"<property name>",
+ * "ref":<number>}
+ *
+ * {"name":"[[BoundThis]]","ref":117}
+ *
+ * @param {InternalPropertyMirror} propertyMirror The property to serialize.
+ * @returns {Object} Protocol object representing the property.
+ */
+JSONProtocolSerializer.prototype.serializeInternalProperty_ =
+ function(propertyMirror) {
+ var result = {};
+
+ result.name = propertyMirror.name();
+ var propertyValue = propertyMirror.value();
+ if (this.inlineRefs_() && propertyValue.isValue()) {
+ result.value = this.serializeReferenceWithDisplayData_(propertyValue);
+ } else {
+ result.ref = propertyValue.handle();
+ }
+ return result;
+};
+
+
JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
content.index = mirror.index();
content.receiver = this.serializeReference(mirror.receiver());
diff --git a/src/3rdparty/v8/src/misc-intrinsics.h b/src/3rdparty/v8/src/misc-intrinsics.h
index 5393de2..c1da8a9 100644
--- a/src/3rdparty/v8/src/misc-intrinsics.h
+++ b/src/3rdparty/v8/src/misc-intrinsics.h
@@ -45,7 +45,7 @@ inline int IntegerLog2(uint32_t value) {
return 31 - __builtin_clz(value);
}
-#elif defined(_MSC_VER)
+#elif defined(_MSC_VER) && !defined(_WIN32_WCE)
#pragma intrinsic(_BitScanReverse)
diff --git a/src/3rdparty/v8/src/mksnapshot.cc b/src/3rdparty/v8/src/mksnapshot.cc
index d1620bf..d777551 100644
--- a/src/3rdparty/v8/src/mksnapshot.cc
+++ b/src/3rdparty/v8/src/mksnapshot.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <errno.h>
+#include <stdio.h>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include <bzlib.h>
#endif
@@ -33,6 +35,7 @@
#include "v8.h"
#include "bootstrapper.h"
+#include "flags.h"
#include "natives.h"
#include "platform.h"
#include "serialize.h"
@@ -163,30 +166,37 @@ class CppByteSink : public PartialSnapshotSink {
}
void WriteSpaceUsed(
+ const char* prefix,
int new_space_used,
int pointer_space_used,
int data_space_used,
int code_space_used,
int map_space_used,
- int cell_space_used,
- int large_space_used) {
- fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
+ int cell_space_used) {
fprintf(fp_,
- "const int Snapshot::pointer_space_used_ = %d;\n",
+ "const int Snapshot::%snew_space_used_ = %d;\n",
+ prefix,
+ new_space_used);
+ fprintf(fp_,
+ "const int Snapshot::%spointer_space_used_ = %d;\n",
+ prefix,
pointer_space_used);
fprintf(fp_,
- "const int Snapshot::data_space_used_ = %d;\n",
+ "const int Snapshot::%sdata_space_used_ = %d;\n",
+ prefix,
data_space_used);
fprintf(fp_,
- "const int Snapshot::code_space_used_ = %d;\n",
+ "const int Snapshot::%scode_space_used_ = %d;\n",
+ prefix,
code_space_used);
- fprintf(fp_, "const int Snapshot::map_space_used_ = %d;\n", map_space_used);
fprintf(fp_,
- "const int Snapshot::cell_space_used_ = %d;\n",
- cell_space_used);
+ "const int Snapshot::%smap_space_used_ = %d;\n",
+ prefix,
+ map_space_used);
fprintf(fp_,
- "const int Snapshot::large_space_used_ = %d;\n",
- large_space_used);
+ "const int Snapshot::%scell_space_used_ = %d;\n",
+ prefix,
+ cell_space_used);
}
void WritePartialSnapshot() {
@@ -303,7 +313,67 @@ int main(int argc, char** argv) {
#endif
i::Serializer::Enable();
Persistent<Context> context = v8::Context::New();
- ASSERT(!context.IsEmpty());
+ if (context.IsEmpty()) {
+ fprintf(stderr,
+ "\nException thrown while compiling natives - see above.\n\n");
+ exit(1);
+ }
+ if (i::FLAG_extra_code != NULL) {
+ context->Enter();
+ // Capture 100 frames if anything happens.
+ V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
+ HandleScope scope;
+ const char* name = i::FLAG_extra_code;
+ FILE* file = i::OS::FOpen(name, "rb");
+ if (file == NULL) {
+ fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno);
+ exit(1);
+ }
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+ rewind(file);
+
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size;) {
+ int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
+ if (read < 0) {
+ fprintf(stderr, "Failed to read '%s': errno %d\n", name, errno);
+ exit(1);
+ }
+ i += read;
+ }
+ fclose(file);
+ Local<String> source = String::New(chars);
+ TryCatch try_catch;
+ Local<Script> script = Script::Compile(source);
+ if (try_catch.HasCaught()) {
+ fprintf(stderr, "Failure compiling '%s' (see above)\n", name);
+ exit(1);
+ }
+ script->Run();
+ if (try_catch.HasCaught()) {
+ fprintf(stderr, "Failure running '%s'\n", name);
+ Local<Message> message = try_catch.Message();
+ Local<String> message_string = message->Get();
+ Local<String> message_line = message->GetSourceLine();
+ int len = 2 + message_string->Utf8Length() + message_line->Utf8Length();
+ char* buf = new char(len);
+ message_string->WriteUtf8(buf);
+ fprintf(stderr, "%s at line %d\n", buf, message->GetLineNumber());
+ message_line->WriteUtf8(buf);
+ fprintf(stderr, "%s\n", buf);
+ int from = message->GetStartColumn();
+ int to = message->GetEndColumn();
+ int i;
+ for (i = 0; i < from; i++) fprintf(stderr, " ");
+ for ( ; i <= to; i++) fprintf(stderr, "^");
+ fprintf(stderr, "\n");
+ exit(1);
+ }
+ context->Exit();
+ }
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
@@ -337,12 +407,20 @@ int main(int argc, char** argv) {
sink.WritePartialSnapshot();
sink.WriteSpaceUsed(
+ "context_",
partial_ser.CurrentAllocationAddress(i::NEW_SPACE),
partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
- partial_ser.CurrentAllocationAddress(i::CELL_SPACE),
- partial_ser.CurrentAllocationAddress(i::LO_SPACE));
+ partial_ser.CurrentAllocationAddress(i::CELL_SPACE));
+ sink.WriteSpaceUsed(
+ "",
+ ser.CurrentAllocationAddress(i::NEW_SPACE),
+ ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
+ ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
+ ser.CurrentAllocationAddress(i::CODE_SPACE),
+ ser.CurrentAllocationAddress(i::MAP_SPACE),
+ ser.CurrentAllocationAddress(i::CELL_SPACE));
return 0;
}
diff --git a/src/3rdparty/v8/src/object-observe.js b/src/3rdparty/v8/src/object-observe.js
new file mode 100644
index 0000000..28aa1f4
--- /dev/null
+++ b/src/3rdparty/v8/src/object-observe.js
@@ -0,0 +1,240 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+var InternalObjectIsFrozen = $Object.isFrozen;
+var InternalObjectFreeze = $Object.freeze;
+
+var observationState = %GetObservationState();
+if (IS_UNDEFINED(observationState.observerInfoMap)) {
+ observationState.observerInfoMap = %CreateObjectHashTable();
+ observationState.objectInfoMap = %CreateObjectHashTable();
+ observationState.notifierTargetMap = %CreateObjectHashTable();
+ observationState.activeObservers = new InternalArray;
+ observationState.observerPriority = 0;
+}
+
+function InternalObjectHashTable(tableName) {
+ this.tableName = tableName;
+}
+
+InternalObjectHashTable.prototype = {
+ get: function(key) {
+ return %ObjectHashTableGet(observationState[this.tableName], key);
+ },
+ set: function(key, value) {
+ observationState[this.tableName] =
+ %ObjectHashTableSet(observationState[this.tableName], key, value);
+ },
+ has: function(key) {
+ return %ObjectHashTableHas(observationState[this.tableName], key);
+ }
+};
+
+var observerInfoMap = new InternalObjectHashTable('observerInfoMap');
+var objectInfoMap = new InternalObjectHashTable('objectInfoMap');
+var notifierTargetMap = new InternalObjectHashTable('notifierTargetMap');
+
+function CreateObjectInfo(object) {
+ var info = {
+ changeObservers: new InternalArray,
+ notifier: null,
+ };
+ objectInfoMap.set(object, info);
+ return info;
+}
+
+function ObjectObserve(object, callback) {
+ if (!IS_SPEC_OBJECT(object))
+ throw MakeTypeError("observe_non_object", ["observe"]);
+ if (!IS_SPEC_FUNCTION(callback))
+ throw MakeTypeError("observe_non_function", ["observe"]);
+ if (InternalObjectIsFrozen(callback))
+ throw MakeTypeError("observe_callback_frozen");
+
+ if (!observerInfoMap.has(callback)) {
+ observerInfoMap.set(callback, {
+ pendingChangeRecords: null,
+ priority: observationState.observerPriority++,
+ });
+ }
+
+ var objectInfo = objectInfoMap.get(object);
+ if (IS_UNDEFINED(objectInfo)) {
+ objectInfo = CreateObjectInfo(object);
+ %SetIsObserved(object, true);
+ }
+
+ var changeObservers = objectInfo.changeObservers;
+ if (changeObservers.indexOf(callback) >= 0)
+ return;
+
+ changeObservers.push(callback);
+}
+
+function ObjectUnobserve(object, callback) {
+ if (!IS_SPEC_OBJECT(object))
+ throw MakeTypeError("observe_non_object", ["unobserve"]);
+
+ var objectInfo = objectInfoMap.get(object);
+ if (IS_UNDEFINED(objectInfo))
+ return;
+
+ var changeObservers = objectInfo.changeObservers;
+ var index = changeObservers.indexOf(callback);
+ if (index < 0)
+ return;
+
+ changeObservers.splice(index, 1);
+}
+
+function EnqueueChangeRecord(changeRecord, observers) {
+ for (var i = 0; i < observers.length; i++) {
+ var observer = observers[i];
+ var observerInfo = observerInfoMap.get(observer);
+ observationState.activeObservers[observerInfo.priority] = observer;
+ %SetObserverDeliveryPending();
+ if (IS_NULL(observerInfo.pendingChangeRecords)) {
+ observerInfo.pendingChangeRecords = new InternalArray(changeRecord);
+ } else {
+ observerInfo.pendingChangeRecords.push(changeRecord);
+ }
+ }
+}
+
+function NotifyChange(type, object, name, oldValue) {
+ var objectInfo = objectInfoMap.get(object);
+ var changeRecord = (arguments.length < 4) ?
+ { type: type, object: object, name: name } :
+ { type: type, object: object, name: name, oldValue: oldValue };
+ InternalObjectFreeze(changeRecord);
+ EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
+}
+
+var notifierPrototype = {};
+
+function ObjectNotifierNotify(changeRecord) {
+ if (!IS_SPEC_OBJECT(this))
+ throw MakeTypeError("called_on_non_object", ["notify"]);
+
+ var target = notifierTargetMap.get(this);
+ if (IS_UNDEFINED(target))
+ throw MakeTypeError("observe_notify_non_notifier");
+
+ if (!IS_STRING(changeRecord.type))
+ throw MakeTypeError("observe_type_non_string");
+
+ var objectInfo = objectInfoMap.get(target);
+ if (IS_UNDEFINED(objectInfo))
+ return;
+
+ if (!objectInfo.changeObservers.length)
+ return;
+
+ var newRecord = {
+ object: target
+ };
+ for (var prop in changeRecord) {
+ if (prop === 'object')
+ continue;
+ newRecord[prop] = changeRecord[prop];
+ }
+ InternalObjectFreeze(newRecord);
+
+ EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
+}
+
+function ObjectGetNotifier(object) {
+ if (!IS_SPEC_OBJECT(object))
+ throw MakeTypeError("observe_non_object", ["getNotifier"]);
+
+ if (InternalObjectIsFrozen(object))
+ return null;
+
+ var objectInfo = objectInfoMap.get(object);
+ if (IS_UNDEFINED(objectInfo))
+ objectInfo = CreateObjectInfo(object);
+
+ if (IS_NULL(objectInfo.notifier)) {
+ objectInfo.notifier = {
+ __proto__: notifierPrototype
+ };
+ notifierTargetMap.set(objectInfo.notifier, object);
+ }
+
+ return objectInfo.notifier;
+}
+
+function DeliverChangeRecordsForObserver(observer) {
+ var observerInfo = observerInfoMap.get(observer);
+ if (IS_UNDEFINED(observerInfo))
+ return;
+
+ var pendingChangeRecords = observerInfo.pendingChangeRecords;
+ if (IS_NULL(pendingChangeRecords))
+ return;
+
+ observerInfo.pendingChangeRecords = null;
+ var delivered = [];
+ %MoveArrayContents(pendingChangeRecords, delivered);
+ try {
+ %Call(void 0, delivered, observer);
+ } catch (ex) {}
+}
+
+function ObjectDeliverChangeRecords(callback) {
+ if (!IS_SPEC_FUNCTION(callback))
+ throw MakeTypeError("observe_non_function", ["deliverChangeRecords"]);
+
+ DeliverChangeRecordsForObserver(callback);
+}
+
+function DeliverChangeRecords() {
+ while (observationState.activeObservers.length) {
+ var activeObservers = observationState.activeObservers;
+ observationState.activeObservers = new InternalArray;
+ for (var i in activeObservers) {
+ DeliverChangeRecordsForObserver(activeObservers[i]);
+ }
+ }
+}
+
+function SetupObjectObserve() {
+ %CheckIsBootstrapping();
+ InstallFunctions($Object, DONT_ENUM, $Array(
+ "deliverChangeRecords", ObjectDeliverChangeRecords,
+ "getNotifier", ObjectGetNotifier,
+ "observe", ObjectObserve,
+ "unobserve", ObjectUnobserve
+ ));
+ InstallFunctions(notifierPrototype, DONT_ENUM, $Array(
+ "notify", ObjectNotifierNotify
+ ));
+}
+
+SetupObjectObserve();
diff --git a/src/3rdparty/v8/src/objects-debug.cc b/src/3rdparty/v8/src/objects-debug.cc
index 9006abd..c2f64d4 100644
--- a/src/3rdparty/v8/src/objects-debug.cc
+++ b/src/3rdparty/v8/src/objects-debug.cc
@@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void MaybeObject::Verify() {
Object* this_as_object;
@@ -55,18 +55,18 @@ void Object::VerifyPointer(Object* p) {
if (p->IsHeapObject()) {
HeapObject::VerifyHeapPointer(p);
} else {
- ASSERT(p->IsSmi());
+ CHECK(p->IsSmi());
}
}
void Smi::SmiVerify() {
- ASSERT(IsSmi());
+ CHECK(IsSmi());
}
void Failure::FailureVerify() {
- ASSERT(IsFailure());
+ CHECK(IsFailure());
}
@@ -207,68 +207,68 @@ void HeapObject::HeapObjectVerify() {
void HeapObject::VerifyHeapPointer(Object* p) {
- ASSERT(p->IsHeapObject());
- ASSERT(HEAP->Contains(HeapObject::cast(p)));
+ CHECK(p->IsHeapObject());
+ CHECK(HEAP->Contains(HeapObject::cast(p)));
}
void HeapNumber::HeapNumberVerify() {
- ASSERT(IsHeapNumber());
+ CHECK(IsHeapNumber());
}
void ByteArray::ByteArrayVerify() {
- ASSERT(IsByteArray());
+ CHECK(IsByteArray());
}
void FreeSpace::FreeSpaceVerify() {
- ASSERT(IsFreeSpace());
+ CHECK(IsFreeSpace());
}
void ExternalPixelArray::ExternalPixelArrayVerify() {
- ASSERT(IsExternalPixelArray());
+ CHECK(IsExternalPixelArray());
}
void ExternalByteArray::ExternalByteArrayVerify() {
- ASSERT(IsExternalByteArray());
+ CHECK(IsExternalByteArray());
}
void ExternalUnsignedByteArray::ExternalUnsignedByteArrayVerify() {
- ASSERT(IsExternalUnsignedByteArray());
+ CHECK(IsExternalUnsignedByteArray());
}
void ExternalShortArray::ExternalShortArrayVerify() {
- ASSERT(IsExternalShortArray());
+ CHECK(IsExternalShortArray());
}
void ExternalUnsignedShortArray::ExternalUnsignedShortArrayVerify() {
- ASSERT(IsExternalUnsignedShortArray());
+ CHECK(IsExternalUnsignedShortArray());
}
void ExternalIntArray::ExternalIntArrayVerify() {
- ASSERT(IsExternalIntArray());
+ CHECK(IsExternalIntArray());
}
void ExternalUnsignedIntArray::ExternalUnsignedIntArrayVerify() {
- ASSERT(IsExternalUnsignedIntArray());
+ CHECK(IsExternalUnsignedIntArray());
}
void ExternalFloatArray::ExternalFloatArrayVerify() {
- ASSERT(IsExternalFloatArray());
+ CHECK(IsExternalFloatArray());
}
void ExternalDoubleArray::ExternalDoubleArrayVerify() {
- ASSERT(IsExternalDoubleArray());
+ CHECK(IsExternalDoubleArray());
}
@@ -277,8 +277,8 @@ void JSObject::JSObjectVerify() {
VerifyHeapPointer(elements());
if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
- ASSERT(this->elements()->IsFixedArray());
- ASSERT(this->elements()->length() >= 2);
+ CHECK(this->elements()->IsFixedArray());
+ CHECK_GE(this->elements()->length(), 2);
}
if (HasFastProperties()) {
@@ -286,35 +286,41 @@ void JSObject::JSObjectVerify() {
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
}
- ASSERT_EQ((map()->has_fast_elements() ||
- map()->has_fast_smi_only_elements() ||
+ CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
(elements() == GetHeap()->empty_fixed_array())),
(elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map()));
- ASSERT(map()->has_fast_elements() == HasFastElements());
+ CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
}
void Map::MapVerify() {
- ASSERT(!HEAP->InNewSpace(this));
- ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
- ASSERT(instance_size() == kVariableSizeSentinel ||
+ CHECK(!HEAP->InNewSpace(this));
+ CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
+ CHECK(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
instance_size() < HEAP->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
+ DescriptorArray* descriptors = instance_descriptors();
+ for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
+ CHECK_EQ(i, descriptors->GetDetails(i).descriptor_index() - 1);
+ }
SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
- SLOW_ASSERT(instance_descriptors()->IsConsistentWithBackPointers(this));
+ if (HasTransitionArray()) {
+ SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
+ SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this));
+ }
}
void Map::SharedMapVerify() {
MapVerify();
- ASSERT(is_shared());
- ASSERT(instance_descriptors()->IsEmpty());
- ASSERT_EQ(0, pre_allocated_property_fields());
- ASSERT_EQ(0, unused_property_fields());
- ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
+ CHECK(is_shared());
+ CHECK(instance_descriptors()->IsEmpty());
+ CHECK_EQ(0, pre_allocated_property_fields());
+ CHECK_EQ(0, unused_property_fields());
+ CHECK_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
visitor_id());
}
@@ -322,21 +328,21 @@ void Map::SharedMapVerify() {
void CodeCache::CodeCacheVerify() {
VerifyHeapPointer(default_cache());
VerifyHeapPointer(normal_type_cache());
- ASSERT(default_cache()->IsFixedArray());
- ASSERT(normal_type_cache()->IsUndefined()
+ CHECK(default_cache()->IsFixedArray());
+ CHECK(normal_type_cache()->IsUndefined()
|| normal_type_cache()->IsCodeCacheHashTable());
}
void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
VerifyHeapPointer(cache());
- ASSERT(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
+ CHECK(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
}
void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
- VerifyObjectField(kIcTotalCountOffset);
- VerifyObjectField(kIcWithTypeinfoCountOffset);
+ VerifyObjectField(kStorage1Offset);
+ VerifyObjectField(kStorage2Offset);
VerifyHeapPointer(type_feedback_cells());
}
@@ -362,7 +368,7 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
for (int i = 0; i < length(); i++) {
if (!is_the_hole(i)) {
double value = get_scalar(i);
- ASSERT(!isnan(value) ||
+ CHECK(!isnan(value) ||
(BitCast<uint64_t>(value) ==
BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
@@ -372,11 +378,10 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
void JSModule::JSModuleVerify() {
- Object* v = context();
- if (v->IsHeapObject()) {
- VerifyHeapPointer(v);
- }
- CHECK(v->IsUndefined() || v->IsModuleContext());
+ VerifyObjectField(kContextOffset);
+ VerifyObjectField(kScopeInfoOffset);
+ CHECK(context()->IsUndefined() ||
+ Context::cast(context())->IsModuleContext());
}
@@ -458,10 +463,17 @@ void String::StringVerify() {
ConsString::cast(this)->ConsStringVerify();
} else if (IsSlicedString()) {
SlicedString::cast(this)->SlicedStringVerify();
+ } else if (IsSeqAsciiString()) {
+ SeqAsciiString::cast(this)->SeqAsciiStringVerify();
}
}
+void SeqAsciiString::SeqAsciiStringVerify() {
+ CHECK(String::IsAscii(GetChars(), length()));
+}
+
+
void ConsString::ConsStringVerify() {
CHECK(this->first()->IsString());
CHECK(this->second() == GetHeap()->empty_string() ||
@@ -487,7 +499,8 @@ void JSFunction::JSFunctionVerify() {
VerifyObjectField(kPrototypeOrInitialMapOffset);
VerifyObjectField(kNextFunctionLinkOffset);
CHECK(code()->IsCode());
- CHECK(next_function_link()->IsUndefined() ||
+ CHECK(next_function_link() == NULL ||
+ next_function_link()->IsUndefined() ||
next_function_link()->IsJSFunction());
}
@@ -496,6 +509,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
CHECK(IsSharedFunctionInfo());
VerifyObjectField(kNameOffset);
VerifyObjectField(kCodeOffset);
+ VerifyObjectField(kOptimizedCodeMapOffset);
VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kInstanceClassNameOffset);
VerifyObjectField(kFunctionDataOffset);
@@ -507,10 +521,10 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
void JSGlobalProxy::JSGlobalProxyVerify() {
CHECK(IsJSGlobalProxy());
JSObjectVerify();
- VerifyObjectField(JSGlobalProxy::kContextOffset);
+ VerifyObjectField(JSGlobalProxy::kNativeContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
- CHECK(HasFastElements());
+ CHECK(HasFastObjectElements());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
@@ -542,14 +556,14 @@ void Oddball::OddballVerify() {
VerifyHeapPointer(to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- ASSERT(number == HEAP->nan_value());
+ CHECK(number == HEAP->nan_value());
} else {
- ASSERT(number->IsSmi());
+ CHECK(number->IsSmi());
int value = Smi::cast(number)->value();
// Hidden oddballs have negative smis.
const int kLeastHiddenOddballNumber = -4;
- ASSERT(value <= 1);
- ASSERT(value >= kLeastHiddenOddballNumber);
+ CHECK_LE(value, 1);
+ CHECK(value >= kLeastHiddenOddballNumber);
}
}
@@ -578,8 +592,8 @@ void Code::CodeVerify() {
void JSArray::JSArrayVerify() {
JSObjectVerify();
- ASSERT(length()->IsNumber() || length()->IsUndefined());
- ASSERT(elements()->IsUndefined() ||
+ CHECK(length()->IsNumber() || length()->IsUndefined());
+ CHECK(elements()->IsUndefined() ||
elements()->IsFixedArray() ||
elements()->IsFixedDoubleArray());
}
@@ -589,7 +603,7 @@ void JSSet::JSSetVerify() {
CHECK(IsJSSet());
JSObjectVerify();
VerifyHeapPointer(table());
- ASSERT(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsHashTable() || table()->IsUndefined());
}
@@ -597,7 +611,7 @@ void JSMap::JSMapVerify() {
CHECK(IsJSMap());
JSObjectVerify();
VerifyHeapPointer(table());
- ASSERT(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsHashTable() || table()->IsUndefined());
}
@@ -605,17 +619,17 @@ void JSWeakMap::JSWeakMapVerify() {
CHECK(IsJSWeakMap());
JSObjectVerify();
VerifyHeapPointer(table());
- ASSERT(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsHashTable() || table()->IsUndefined());
}
void JSRegExp::JSRegExpVerify() {
JSObjectVerify();
- ASSERT(data()->IsUndefined() || data()->IsFixedArray());
+ CHECK(data()->IsUndefined() || data()->IsFixedArray());
switch (TypeTag()) {
case JSRegExp::ATOM: {
FixedArray* arr = FixedArray::cast(data());
- ASSERT(arr->get(JSRegExp::kAtomPatternIndex)->IsString());
+ CHECK(arr->get(JSRegExp::kAtomPatternIndex)->IsString());
break;
}
case JSRegExp::IRREGEXP: {
@@ -626,26 +640,26 @@ void JSRegExp::JSRegExpVerify() {
// Smi : Not compiled yet (-1) or code prepared for flushing.
// JSObject: Compilation error.
// Code/ByteArray: Compiled code.
- ASSERT(ascii_data->IsSmi() ||
+ CHECK(ascii_data->IsSmi() ||
(is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
- ASSERT(uc16_data->IsSmi() ||
+ CHECK(uc16_data->IsSmi() ||
(is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
Object* ascii_saved = arr->get(JSRegExp::kIrregexpASCIICodeSavedIndex);
- ASSERT(ascii_saved->IsSmi() || ascii_saved->IsString() ||
+ CHECK(ascii_saved->IsSmi() || ascii_saved->IsString() ||
ascii_saved->IsCode());
Object* uc16_saved = arr->get(JSRegExp::kIrregexpUC16CodeSavedIndex);
- ASSERT(uc16_saved->IsSmi() || uc16_saved->IsString() ||
+ CHECK(uc16_saved->IsSmi() || uc16_saved->IsString() ||
uc16_saved->IsCode());
- ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
- ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
+ CHECK(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
+ CHECK(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
break;
}
default:
- ASSERT_EQ(JSRegExp::NOT_COMPILED, TypeTag());
- ASSERT(data()->IsUndefined());
+ CHECK_EQ(JSRegExp::NOT_COMPILED, TypeTag());
+ CHECK(data()->IsUndefined());
break;
}
}
@@ -654,7 +668,7 @@ void JSRegExp::JSRegExpVerify() {
void JSProxy::JSProxyVerify() {
CHECK(IsJSProxy());
VerifyPointer(handler());
- ASSERT(hash()->IsSmi() || hash()->IsUndefined());
+ CHECK(hash()->IsSmi() || hash()->IsUndefined());
}
@@ -667,7 +681,7 @@ void JSFunctionProxy::JSFunctionProxyVerify() {
void Foreign::ForeignVerify() {
- ASSERT(IsForeign());
+ CHECK(IsForeign());
}
@@ -678,6 +692,7 @@ void AccessorInfo::AccessorInfoVerify() {
VerifyPointer(name());
VerifyPointer(data());
VerifyPointer(flag());
+ VerifyPointer(expected_receiver_type());
}
@@ -770,6 +785,47 @@ void Script::ScriptVerify() {
}
+void JSFunctionResultCache::JSFunctionResultCacheVerify() {
+ JSFunction::cast(get(kFactoryIndex))->Verify();
+
+ int size = Smi::cast(get(kCacheSizeIndex))->value();
+ CHECK(kEntriesIndex <= size);
+ CHECK(size <= length());
+ CHECK_EQ(0, size % kEntrySize);
+
+ int finger = Smi::cast(get(kFingerIndex))->value();
+ CHECK(kEntriesIndex <= finger);
+ CHECK((finger < size) || (finger == kEntriesIndex && finger == size));
+ CHECK_EQ(0, finger % kEntrySize);
+
+ if (FLAG_enable_slow_asserts) {
+ for (int i = kEntriesIndex; i < size; i++) {
+ CHECK(!get(i)->IsTheHole());
+ get(i)->Verify();
+ }
+ for (int i = size; i < length(); i++) {
+ CHECK(get(i)->IsTheHole());
+ get(i)->Verify();
+ }
+ }
+}
+
+
+void NormalizedMapCache::NormalizedMapCacheVerify() {
+ FixedArray::cast(this)->Verify();
+ if (FLAG_enable_slow_asserts) {
+ for (int i = 0; i < length(); i++) {
+ Object* e = get(i);
+ if (e->IsMap()) {
+ Map::cast(e)->SharedMapVerify();
+ } else {
+ CHECK(e->IsUndefined());
+ }
+ }
+ }
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugInfo::DebugInfoVerify() {
CHECK(IsDebugInfo());
@@ -788,7 +844,9 @@ void BreakPointInfo::BreakPointInfoVerify() {
VerifyPointer(break_point_objects());
}
#endif // ENABLE_DEBUGGER_SUPPORT
+#endif // VERIFY_HEAP
+#ifdef DEBUG
void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_objects_++;
@@ -805,6 +863,11 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
}
// Indexed properties
switch (GetElementsKind()) {
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
int holes = 0;
@@ -818,6 +881,14 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_unused_elements_ += holes;
break;
}
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
ExternalPixelArray* e = ExternalPixelArray::cast(elements());
@@ -831,8 +902,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
dict->Capacity() - dict->NumberOfElements();
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
break;
}
}
@@ -875,17 +945,18 @@ void JSObject::SpillInformation::Print() {
}
-bool DescriptorArray::IsSortedNoDuplicates() {
+bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
+ if (valid_entries == -1) valid_entries = number_of_descriptors();
String* current_key = NULL;
uint32_t current = 0;
for (int i = 0; i < number_of_descriptors(); i++) {
- String* key = GetKey(i);
+ String* key = GetSortedKey(i);
if (key == current_key) {
PrintDescriptors();
return false;
}
current_key = key;
- uint32_t hash = GetKey(i)->Hash();
+ uint32_t hash = GetSortedKey(i)->Hash();
if (hash < current) {
PrintDescriptors();
return false;
@@ -896,121 +967,42 @@ bool DescriptorArray::IsSortedNoDuplicates() {
}
-static bool CheckOneBackPointer(Map* current_map, Object* target) {
- return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
-}
-
-
-bool DescriptorArray::IsConsistentWithBackPointers(Map* current_map) {
- for (int i = 0; i < number_of_descriptors(); ++i) {
- switch (GetType(i)) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- if (!CheckOneBackPointer(current_map, GetValue(i))) {
- return false;
- }
- break;
- case ELEMENTS_TRANSITION: {
- Object* object = GetValue(i);
- if (!CheckOneBackPointer(current_map, object)) {
- return false;
- }
- if (object->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(object);
- for (int i = 0; i < array->length(); ++i) {
- if (!CheckOneBackPointer(current_map, array->get(i))) {
- return false;
- }
- }
- }
- break;
- }
- case CALLBACKS: {
- Object* object = GetValue(i);
- if (object->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(object);
- if (!CheckOneBackPointer(current_map, accessors->getter())) {
- return false;
- }
- if (!CheckOneBackPointer(current_map, accessors->setter())) {
- return false;
- }
- }
- break;
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- break;
+bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
+ ASSERT(valid_entries == -1);
+ String* current_key = NULL;
+ uint32_t current = 0;
+ for (int i = 0; i < number_of_transitions(); i++) {
+ String* key = GetSortedKey(i);
+ if (key == current_key) {
+ PrintTransitions();
+ return false;
+ }
+ current_key = key;
+ uint32_t hash = GetSortedKey(i)->Hash();
+ if (hash < current) {
+ PrintTransitions();
+ return false;
}
+ current = hash;
}
return true;
}
-void JSFunctionResultCache::JSFunctionResultCacheVerify() {
- JSFunction::cast(get(kFactoryIndex))->Verify();
-
- int size = Smi::cast(get(kCacheSizeIndex))->value();
- ASSERT(kEntriesIndex <= size);
- ASSERT(size <= length());
- ASSERT_EQ(0, size % kEntrySize);
-
- int finger = Smi::cast(get(kFingerIndex))->value();
- ASSERT(kEntriesIndex <= finger);
- ASSERT((finger < size) || (finger == kEntriesIndex && finger == size));
- ASSERT_EQ(0, finger % kEntrySize);
-
- if (FLAG_enable_slow_asserts) {
- for (int i = kEntriesIndex; i < size; i++) {
- ASSERT(!get(i)->IsTheHole());
- get(i)->Verify();
- }
- for (int i = size; i < length(); i++) {
- ASSERT(get(i)->IsTheHole());
- get(i)->Verify();
- }
- }
+static bool CheckOneBackPointer(Map* current_map, Object* target) {
+ return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
}
-void NormalizedMapCache::NormalizedMapCacheVerify() {
- FixedArray::cast(this)->Verify();
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < length(); i++) {
- Object* e = get(i);
- if (e->IsMap()) {
- Map::cast(e)->SharedMapVerify();
- } else {
- ASSERT(e->IsUndefined());
- }
- }
+bool TransitionArray::IsConsistentWithBackPointers(Map* current_map) {
+ if (HasElementsTransition() &&
+ !CheckOneBackPointer(current_map, elements_transition())) {
+ return false;
}
-}
-
-
-void Map::ZapInstanceDescriptors() {
- DescriptorArray* descriptors = instance_descriptors();
- if (descriptors == GetHeap()->empty_descriptor_array()) return;
- FixedArray* contents = FixedArray::cast(
- descriptors->get(DescriptorArray::kContentArrayIndex));
- MemsetPointer(descriptors->data_start(),
- GetHeap()->the_hole_value(),
- descriptors->length());
- MemsetPointer(contents->data_start(),
- GetHeap()->the_hole_value(),
- contents->length());
-}
-
-
-void Map::ZapPrototypeTransitions() {
- FixedArray* proto_transitions = prototype_transitions();
- MemsetPointer(proto_transitions->data_start(),
- GetHeap()->the_hole_value(),
- proto_transitions->length());
+ for (int i = 0; i < number_of_transitions(); ++i) {
+ if (!CheckOneBackPointer(current_map, GetTarget(i))) return false;
+ }
+ return true;
}
diff --git a/src/3rdparty/v8/src/objects-inl.h b/src/3rdparty/v8/src/objects-inl.h
index 74e63f2..b45b4d0 100644
--- a/src/3rdparty/v8/src/objects-inl.h
+++ b/src/3rdparty/v8/src/objects-inl.h
@@ -47,6 +47,7 @@
#include "v8memory.h"
#include "factory.h"
#include "incremental-marking.h"
+#include "transitions-inl.h"
namespace v8 {
namespace internal {
@@ -128,18 +129,6 @@ PropertyDetails PropertyDetails::AsDeleted() {
}
-bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
- ElementsKind to_kind) {
- if (to_kind == FAST_ELEMENTS) {
- return from_kind == FAST_SMI_ONLY_ELEMENTS ||
- from_kind == FAST_DOUBLE_ELEMENTS;
- } else {
- return to_kind == FAST_DOUBLE_ELEMENTS &&
- from_kind == FAST_SMI_ONLY_ELEMENTS;
- }
-}
-
-
bool Object::IsFixedArrayBase() {
return IsFixedArray() || IsFixedDoubleArray();
}
@@ -300,7 +289,7 @@ bool StringShape::IsSymbol() {
bool String::IsAsciiRepresentation() {
uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kAsciiStringTag;
+ return (type & kStringEncodingMask) == kOneByteStringTag;
}
@@ -316,7 +305,7 @@ bool String::IsAsciiRepresentationUnderneath() {
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
ASSERT(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kAsciiStringTag:
+ case kOneByteStringTag:
return true;
case kTwoByteStringTag:
return false;
@@ -332,7 +321,7 @@ bool String::IsTwoByteRepresentationUnderneath() {
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
ASSERT(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kAsciiStringTag:
+ case kOneByteStringTag:
return false;
case kTwoByteStringTag:
return true;
@@ -344,7 +333,7 @@ bool String::IsTwoByteRepresentationUnderneath() {
bool String::HasOnlyAsciiChars() {
uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kAsciiStringTag ||
+ return (type & kStringEncodingMask) == kOneByteStringTag ||
(type & kAsciiDataHintMask) == kAsciiDataHintTag;
}
@@ -393,9 +382,12 @@ uint32_t StringShape::full_representation_tag() {
STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
Internals::kFullStringRepresentationMask);
+STATIC_CHECK(static_cast<uint32_t>(kStringEncodingMask) ==
+ Internals::kStringEncodingMask);
+
bool StringShape::IsSequentialAscii() {
- return full_representation_tag() == (kSeqStringTag | kAsciiStringTag);
+ return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
}
@@ -405,10 +397,16 @@ bool StringShape::IsSequentialTwoByte() {
bool StringShape::IsExternalAscii() {
- return full_representation_tag() == (kExternalStringTag | kAsciiStringTag);
+ return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
}
+STATIC_CHECK((kExternalStringTag | kOneByteStringTag) ==
+ Internals::kExternalAsciiRepresentationTag);
+
+STATIC_CHECK(v8::String::ASCII_ENCODING == kOneByteStringTag);
+
+
bool StringShape::IsExternalTwoByte() {
return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
}
@@ -417,6 +415,7 @@ bool StringShape::IsExternalTwoByte() {
STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) ==
Internals::kExternalTwoByteRepresentationTag);
+STATIC_CHECK(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
uc32 FlatStringReader::Get(int index) {
ASSERT(0 <= index && index <= length_);
@@ -536,6 +535,11 @@ bool Object::IsDescriptorArray() {
}
+bool Object::IsTransitionArray() {
+ return IsFixedArray();
+}
+
+
bool Object::IsDeoptimizationInputData() {
// Must be a fixed array.
if (!IsFixedArray()) return false;
@@ -574,31 +578,23 @@ bool Object::IsTypeFeedbackCells() {
bool Object::IsContext() {
- if (Object::IsHeapObject()) {
- Map* map = HeapObject::cast(this)->map();
- Heap* heap = map->GetHeap();
- return (map == heap->function_context_map() ||
- map == heap->catch_context_map() ||
- map == heap->with_context_map() ||
- map == heap->global_context_map() ||
- map == heap->block_context_map() ||
- map == heap->module_context_map());
- }
- return false;
-}
-
-
-bool Object::IsGlobalContext() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->global_context_map();
+ if (!Object::IsHeapObject()) return false;
+ Map* map = HeapObject::cast(this)->map();
+ Heap* heap = map->GetHeap();
+ return (map == heap->function_context_map() ||
+ map == heap->catch_context_map() ||
+ map == heap->with_context_map() ||
+ map == heap->native_context_map() ||
+ map == heap->block_context_map() ||
+ map == heap->module_context_map() ||
+ map == heap->global_context_map());
}
-bool Object::IsModuleContext() {
+bool Object::IsNativeContext() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->module_context_map();
+ HeapObject::cast(this)->GetHeap()->native_context_map();
}
@@ -664,8 +660,8 @@ bool Object::IsDictionary() {
bool Object::IsSymbolTable() {
- return IsHashTable() && this ==
- HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table();
+ return IsHashTable() &&
+ this == HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table();
}
@@ -678,7 +674,7 @@ bool Object::IsJSFunctionResultCache() {
% JSFunctionResultCache::kEntrySize != 0) {
return false;
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
reinterpret_cast<JSFunctionResultCache*>(this)->
JSFunctionResultCacheVerify();
@@ -693,7 +689,7 @@ bool Object::IsNormalizedMapCache() {
if (FixedArray::cast(this)->length() != NormalizedMapCache::kEntries) {
return false;
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
}
@@ -722,6 +718,11 @@ bool Object::IsMapCache() {
}
+bool Object::IsObjectHashTable() {
+ return IsHashTable();
+}
+
+
bool Object::IsPrimitive() {
return IsOddball() || IsNumber() || IsString();
}
@@ -1114,13 +1115,13 @@ HeapObject* MapWord::ToForwardingAddress() {
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
}
void HeapObject::VerifySmiField(int offset) {
- ASSERT(READ_FIELD(this, offset)->IsSmi());
+ CHECK(READ_FIELD(this, offset)->IsSmi());
}
#endif
@@ -1244,35 +1245,26 @@ FixedArrayBase* JSObject::elements() {
return static_cast<FixedArrayBase*>(array);
}
-void JSObject::ValidateSmiOnlyElements() {
+
+void JSObject::ValidateElements() {
#if DEBUG
- if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
- Heap* heap = GetHeap();
- // Don't use elements, since integrity checks will fail if there
- // are filler pointers in the array.
- FixedArray* fixed_array =
- reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset));
- Map* map = fixed_array->map();
- // Arrays that have been shifted in place can't be verified.
- if (map != heap->raw_unchecked_one_pointer_filler_map() &&
- map != heap->raw_unchecked_two_pointer_filler_map() &&
- map != heap->free_space_map()) {
- for (int i = 0; i < fixed_array->length(); i++) {
- Object* current = fixed_array->get(i);
- ASSERT(current->IsSmi() || current->IsTheHole());
- }
- }
+ if (FLAG_enable_slow_asserts) {
+ ElementsAccessor* accessor = GetElementsAccessor();
+ accessor->Validate(this);
}
#endif
}
MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
-#if DEBUG
- ValidateSmiOnlyElements();
-#endif
- if ((map()->elements_kind() != FAST_ELEMENTS)) {
- return TransitionElementsKind(FAST_ELEMENTS);
+ ValidateElements();
+ ElementsKind elements_kind = map()->elements_kind();
+ if (!IsFastObjectElementsKind(elements_kind)) {
+ if (IsFastHoleyElementsKind(elements_kind)) {
+ return TransitionElementsKind(FAST_HOLEY_ELEMENTS);
+ } else {
+ return TransitionElementsKind(FAST_ELEMENTS);
+ }
}
return this;
}
@@ -1284,20 +1276,29 @@ MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
ElementsKind current_kind = map()->elements_kind();
ElementsKind target_kind = current_kind;
ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (current_kind == FAST_ELEMENTS) return this;
-
+ bool is_holey = IsFastHoleyElementsKind(current_kind);
+ if (current_kind == FAST_HOLEY_ELEMENTS) return this;
Heap* heap = GetHeap();
Object* the_hole = heap->the_hole_value();
- Object* heap_number_map = heap->heap_number_map();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
- if (!current->IsSmi() && current != the_hole) {
- if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
- HeapObject::cast(current)->map() == heap_number_map) {
- target_kind = FAST_DOUBLE_ELEMENTS;
+ if (current == the_hole) {
+ is_holey = true;
+ target_kind = GetHoleyElementsKind(target_kind);
+ } else if (!current->IsSmi()) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
+ if (IsFastSmiElementsKind(target_kind)) {
+ if (is_holey) {
+ target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ }
+ }
+ } else if (is_holey) {
+ target_kind = FAST_HOLEY_ELEMENTS;
+ break;
} else {
target_kind = FAST_ELEMENTS;
- break;
}
}
}
@@ -1310,6 +1311,7 @@ MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
+ uint32_t length,
EnsureElementsMode mode) {
if (elements->map() != GetHeap()->fixed_double_array_map()) {
ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
@@ -1318,11 +1320,19 @@ MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
- return EnsureCanContainElements(objects, elements->length(), mode);
+ return EnsureCanContainElements(objects, length, mode);
}
ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
+ if (GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
+ return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
+ } else if (GetElementsKind() == FAST_SMI_ELEMENTS) {
+ FixedDoubleArray* double_array = FixedDoubleArray::cast(elements);
+ for (uint32_t i = 0; i < length; ++i) {
+ if (double_array->is_the_hole(i)) {
+ return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
+ }
+ }
return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
}
@@ -1334,21 +1344,20 @@ MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate,
ElementsKind to_kind) {
Map* current_map = map();
ElementsKind from_kind = current_map->elements_kind();
-
if (from_kind == to_kind) return current_map;
- Context* global_context = isolate->context()->global_context();
- if (current_map == global_context->smi_js_array_map()) {
- if (to_kind == FAST_ELEMENTS) {
- return global_context->object_js_array_map();
- } else {
- if (to_kind == FAST_DOUBLE_ELEMENTS) {
- return global_context->double_js_array_map();
- } else {
- ASSERT(to_kind == DICTIONARY_ELEMENTS);
+ Context* native_context = isolate->context()->native_context();
+ Object* maybe_array_maps = native_context->js_array_maps();
+ if (maybe_array_maps->IsFixedArray()) {
+ FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
+ if (array_maps->get(from_kind) == current_map) {
+ Object* maybe_transitioned_map = array_maps->get(to_kind);
+ if (maybe_transitioned_map->IsMap()) {
+ return Map::cast(maybe_transitioned_map);
}
}
}
+
return GetElementsTransitionMapSlow(to_kind);
}
@@ -1357,9 +1366,6 @@ void JSObject::set_map_and_elements(Map* new_map,
FixedArrayBase* value,
WriteBarrierMode mode) {
ASSERT(value->HasValidElements());
-#ifdef DEBUG
- ValidateSmiOnlyElements();
-#endif
if (new_map != NULL) {
if (mode == UPDATE_WRITE_BARRIER) {
set_map(new_map);
@@ -1368,8 +1374,7 @@ void JSObject::set_map_and_elements(Map* new_map,
set_map_no_write_barrier(new_map);
}
}
- ASSERT((map()->has_fast_elements() ||
- map()->has_fast_smi_only_elements() ||
+ ASSERT((map()->has_fast_smi_or_object_elements() ||
(value == GetHeap()->empty_fixed_array())) ==
(value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map()));
@@ -1392,8 +1397,7 @@ void JSObject::initialize_properties() {
void JSObject::initialize_elements() {
- ASSERT(map()->has_fast_elements() ||
- map()->has_fast_smi_only_elements() ||
+ ASSERT(map()->has_fast_smi_or_object_elements() ||
map()->has_fast_double_elements());
ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
@@ -1402,9 +1406,10 @@ void JSObject::initialize_elements() {
MaybeObject* JSObject::ResetElements() {
Object* obj;
- ElementsKind elements_kind = FLAG_smi_only_arrays
- ? FAST_SMI_ONLY_ELEMENTS
- : FAST_ELEMENTS;
+ ElementsKind elements_kind = GetInitialFastElementsKind();
+ if (!FLAG_smi_only_arrays) {
+ elements_kind = FastSmiToObjectElementsKind(elements_kind);
+ }
MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
elements_kind);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -1414,6 +1419,43 @@ MaybeObject* JSObject::ResetElements() {
}
+MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) {
+ ASSERT(this->map()->NumberOfOwnDescriptors() + 1 ==
+ map->NumberOfOwnDescriptors());
+ if (this->map()->unused_property_fields() == 0) {
+ int new_size = properties()->length() + map->unused_property_fields() + 1;
+ FixedArray* new_properties;
+ MaybeObject* maybe_properties = properties()->CopySize(new_size);
+ if (!maybe_properties->To(&new_properties)) return maybe_properties;
+ set_properties(new_properties);
+ }
+ set_map(map);
+ return this;
+}
+
+
+bool JSObject::TryTransitionToField(Handle<JSObject> object,
+ Handle<String> key) {
+ if (!object->map()->HasTransitionArray()) return false;
+ Handle<TransitionArray> transitions(object->map()->transitions());
+ int transition = transitions->Search(*key);
+ if (transition == TransitionArray::kNotFound) return false;
+ PropertyDetails target_details = transitions->GetTargetDetails(transition);
+ if (target_details.type() != FIELD) return false;
+ if (target_details.attributes() != NONE) return false;
+ Handle<Map> target(transitions->GetTarget(transition));
+ JSObject::AddFastPropertyUsingMap(object, target);
+ return true;
+}
+
+
+int JSObject::LastAddedFieldIndex() {
+ Map* map = this->map();
+ int last_added = map->LastAdded();
+ return map->instance_descriptors()->GetFieldIndex(last_added);
+}
+
+
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
@@ -1627,17 +1669,27 @@ void JSObject::InitializeBody(Map* map,
bool JSObject::HasFastProperties() {
+ ASSERT(properties()->IsDictionary() == map()->is_dictionary_map());
return !properties()->IsDictionary();
}
-int JSObject::MaxFastProperties() {
+bool JSObject::TooManyFastProperties(int properties,
+ JSObject::StoreFromKeyed store_mode) {
// Allow extra fast properties if the object has more than
- // kMaxFastProperties in-object properties. When this is the case,
+ // kFastPropertiesSoftLimit in-object properties. When this is the case,
// it is very unlikely that the object is being used as a dictionary
// and there is a good chance that allowing more map transitions
// will be worth it.
- return Max(map()->inobject_properties(), kMaxFastProperties);
+ int inobject = map()->inobject_properties();
+
+ int limit;
+ if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) {
+ limit = Max(inobject, kMaxFastProperties);
+ } else {
+ limit = Max(inobject, kFastPropertiesSoftLimit);
+ }
+ return properties > limit;
}
@@ -1681,6 +1733,23 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
}
+
+void Object::VerifyApiCallResultType() {
+#if ENABLE_EXTRA_CHECKS
+ if (!(IsSmi() ||
+ IsString() ||
+ IsSpecObject() ||
+ IsHeapNumber() ||
+ IsUndefined() ||
+ IsTrue() ||
+ IsFalse() ||
+ IsNull())) {
+ FATAL("API call returned invalid object");
+ }
+#endif // ENABLE_EXTRA_CHECKS
+}
+
+
FixedArrayBase* FixedArrayBase::cast(Object* object) {
ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray());
return reinterpret_cast<FixedArrayBase*>(object);
@@ -1693,6 +1762,11 @@ Object* FixedArray::get(int index) {
}
+bool FixedArray::is_the_hole(int index) {
+ return get(index) == GetHeap()->the_hole_value();
+}
+
+
void FixedArray::set(int index, Smi* value) {
ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
@@ -1798,7 +1872,7 @@ void FixedArray::set(int index,
void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
+ ASSERT(array->map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(array, offset, value);
@@ -1812,7 +1886,7 @@ void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
+ ASSERT(array->map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
ASSERT(!HEAP->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
@@ -1874,7 +1948,7 @@ void FixedArray::set_unchecked(Heap* heap,
void FixedArray::set_null_unchecked(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!HEAP->InNewSpace(heap->null_value()));
+ ASSERT(!heap->InNewSpace(heap->null_value()));
WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
}
@@ -1885,76 +1959,198 @@ Object** FixedArray::data_start() {
bool DescriptorArray::IsEmpty() {
- ASSERT(this->IsSmi() ||
- this->length() > kFirstIndex ||
+ ASSERT(length() >= kFirstIndex ||
this == HEAP->empty_descriptor_array());
- return this->IsSmi() || length() <= kFirstIndex;
+ return length() < kFirstIndex;
}
-int DescriptorArray::bit_field3_storage() {
- Object* storage = READ_FIELD(this, kBitField3StorageOffset);
- return Smi::cast(storage)->value();
+void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) {
+ WRITE_FIELD(
+ this, kDescriptorLengthOffset, Smi::FromInt(number_of_descriptors));
}
-void DescriptorArray::set_bit_field3_storage(int value) {
- ASSERT(!IsEmpty());
- WRITE_FIELD(this, kBitField3StorageOffset, Smi::FromInt(value));
+
+// Perform a binary search in a fixed array. Low and high are entry indices. If
+// there are three entries in this array it should be called with low=0 and
+// high=2.
+template<SearchMode search_mode, typename T>
+int BinarySearch(T* array, String* name, int low, int high, int valid_entries) {
+ uint32_t hash = name->Hash();
+ int limit = high;
+
+ ASSERT(low <= high);
+
+ while (low != high) {
+ int mid = (low + high) / 2;
+ String* mid_name = array->GetSortedKey(mid);
+ uint32_t mid_hash = mid_name->Hash();
+
+ if (mid_hash >= hash) {
+ high = mid;
+ } else {
+ low = mid + 1;
+ }
+ }
+
+ for (; low <= limit; ++low) {
+ int sort_index = array->GetSortedKeyIndex(low);
+ String* entry = array->GetKey(sort_index);
+ if (entry->Hash() != hash) break;
+ if (entry->Equals(name)) {
+ if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {
+ return sort_index;
+ }
+ return T::kNotFound;
+ }
+ }
+
+ return T::kNotFound;
}
-void DescriptorArray::NoIncrementalWriteBarrierSwap(FixedArray* array,
- int first,
- int second) {
- Object* tmp = array->get(first);
- NoIncrementalWriteBarrierSet(array, first, array->get(second));
- NoIncrementalWriteBarrierSet(array, second, tmp);
+// Perform a linear search in this fixed array. len is the number of entry
+// indices that are valid.
+template<SearchMode search_mode, typename T>
+int LinearSearch(T* array, String* name, int len, int valid_entries) {
+ uint32_t hash = name->Hash();
+ if (search_mode == ALL_ENTRIES) {
+ for (int number = 0; number < len; number++) {
+ int sorted_index = array->GetSortedKeyIndex(number);
+ String* entry = array->GetKey(sorted_index);
+ uint32_t current_hash = entry->Hash();
+ if (current_hash > hash) break;
+ if (current_hash == hash && entry->Equals(name)) return sorted_index;
+ }
+ } else {
+ ASSERT(len >= valid_entries);
+ for (int number = 0; number < valid_entries; number++) {
+ String* entry = array->GetKey(number);
+ uint32_t current_hash = entry->Hash();
+ if (current_hash == hash && entry->Equals(name)) return number;
+ }
+ }
+ return T::kNotFound;
}
-int DescriptorArray::Search(String* name) {
- SLOW_ASSERT(IsSortedNoDuplicates());
+template<SearchMode search_mode, typename T>
+int Search(T* array, String* name, int valid_entries) {
+ if (search_mode == VALID_ENTRIES) {
+ SLOW_ASSERT(array->IsSortedNoDuplicates(valid_entries));
+ } else {
+ SLOW_ASSERT(array->IsSortedNoDuplicates());
+ }
- // Check for empty descriptor array.
- int nof = number_of_descriptors();
- if (nof == 0) return kNotFound;
+ int nof = array->number_of_entries();
+ if (nof == 0) return T::kNotFound;
// Fast case: do linear search for small arrays.
const int kMaxElementsForLinearSearch = 8;
- if (StringShape(name).IsSymbol() && nof < kMaxElementsForLinearSearch) {
- return LinearSearch(name, nof);
+ if ((search_mode == ALL_ENTRIES &&
+ nof <= kMaxElementsForLinearSearch) ||
+ (search_mode == VALID_ENTRIES &&
+ valid_entries <= (kMaxElementsForLinearSearch * 3))) {
+ return LinearSearch<search_mode>(array, name, nof, valid_entries);
}
// Slow case: perform binary search.
- return BinarySearch(name, 0, nof - 1);
+ return BinarySearch<search_mode>(array, name, 0, nof - 1, valid_entries);
+}
+
+
+int DescriptorArray::Search(String* name, int valid_descriptors) {
+ return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors);
}
-int DescriptorArray::SearchWithCache(String* name) {
- int number = GetIsolate()->descriptor_lookup_cache()->Lookup(this, name);
+int DescriptorArray::SearchWithCache(String* name, Map* map) {
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) return kNotFound;
+
+ DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
+ int number = cache->Lookup(map, name);
+
if (number == DescriptorLookupCache::kAbsent) {
- number = Search(name);
- GetIsolate()->descriptor_lookup_cache()->Update(this, name, number);
+ number = Search(name, number_of_own_descriptors);
+ cache->Update(map, name, number);
}
+
return number;
}
+void Map::LookupDescriptor(JSObject* holder,
+ String* name,
+ LookupResult* result) {
+ DescriptorArray* descriptors = this->instance_descriptors();
+ int number = descriptors->SearchWithCache(name, this);
+ if (number == DescriptorArray::kNotFound) return result->NotFound();
+ result->DescriptorResult(holder, descriptors->GetDetails(number), number);
+}
+
+
+void Map::LookupTransition(JSObject* holder,
+ String* name,
+ LookupResult* result) {
+ if (HasTransitionArray()) {
+ TransitionArray* transition_array = transitions();
+ int number = transition_array->Search(name);
+ if (number != TransitionArray::kNotFound) {
+ return result->TransitionResult(holder, number);
+ }
+ }
+ result->NotFound();
+}
+
+
+Object** DescriptorArray::GetKeySlot(int descriptor_number) {
+ ASSERT(descriptor_number < number_of_descriptors());
+ return HeapObject::RawField(
+ reinterpret_cast<HeapObject*>(this),
+ OffsetOfElementAt(ToKeyIndex(descriptor_number)));
+}
+
+
String* DescriptorArray::GetKey(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return String::cast(get(ToKeyIndex(descriptor_number)));
}
+int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
+ return GetDetails(descriptor_number).pointer();
+}
+
+
+String* DescriptorArray::GetSortedKey(int descriptor_number) {
+ return GetKey(GetSortedKeyIndex(descriptor_number));
+}
+
+
+void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
+ PropertyDetails details = GetDetails(descriptor_index);
+ set(ToDetailsIndex(descriptor_index), details.set_pointer(pointer).AsSmi());
+}
+
+
+Object** DescriptorArray::GetValueSlot(int descriptor_number) {
+ ASSERT(descriptor_number < number_of_descriptors());
+ return HeapObject::RawField(
+ reinterpret_cast<HeapObject*>(this),
+ OffsetOfElementAt(ToValueIndex(descriptor_number)));
+}
+
+
Object* DescriptorArray::GetValue(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
- return GetContentArray()->get(ToValueIndex(descriptor_number));
+ return get(ToValueIndex(descriptor_number));
}
PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
- Object* details = GetContentArray()->get(ToDetailsIndex(descriptor_number));
+ Object* details = get(ToDetailsIndex(descriptor_number));
return PropertyDetails(Smi::cast(details));
}
@@ -1987,42 +2183,6 @@ AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
}
-bool DescriptorArray::IsProperty(int descriptor_number) {
- Entry entry(this, descriptor_number);
- return IsPropertyDescriptor(&entry);
-}
-
-
-bool DescriptorArray::IsTransitionOnly(int descriptor_number) {
- switch (GetType(descriptor_number)) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
- return true;
- case CALLBACKS: {
- Object* value = GetValue(descriptor_number);
- if (!value->IsAccessorPair()) return false;
- AccessorPair* accessors = AccessorPair::cast(value);
- return accessors->getter()->IsMap() && accessors->setter()->IsMap();
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- return false;
- }
- UNREACHABLE(); // Keep the compiler happy.
- return false;
-}
-
-
-bool DescriptorArray::IsNullDescriptor(int descriptor_number) {
- return GetType(descriptor_number) == NULL_DESCRIPTOR;
-}
-
-
void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
desc->Init(GetKey(descriptor_number),
GetValue(descriptor_number),
@@ -2035,40 +2195,89 @@ void DescriptorArray::Set(int descriptor_number,
const WhitenessWitness&) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
+ ASSERT(desc->GetDetails().descriptor_index() <=
+ number_of_descriptors());
+ ASSERT(desc->GetDetails().descriptor_index() > 0);
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
desc->GetKey());
- FixedArray* content_array = GetContentArray();
- NoIncrementalWriteBarrierSet(content_array,
+ NoIncrementalWriteBarrierSet(this,
ToValueIndex(descriptor_number),
desc->GetValue());
- NoIncrementalWriteBarrierSet(content_array,
+ NoIncrementalWriteBarrierSet(this,
ToDetailsIndex(descriptor_number),
desc->GetDetails().AsSmi());
}
-void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
- int first, int second) {
- NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
- FixedArray* content_array = GetContentArray();
- NoIncrementalWriteBarrierSwap(content_array,
- ToValueIndex(first),
- ToValueIndex(second));
- NoIncrementalWriteBarrierSwap(content_array,
- ToDetailsIndex(first),
- ToDetailsIndex(second));
+void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
+ // Range check.
+ ASSERT(descriptor_number < number_of_descriptors());
+ ASSERT(desc->GetDetails().descriptor_index() <=
+ number_of_descriptors());
+ ASSERT(desc->GetDetails().descriptor_index() > 0);
+
+ set(ToKeyIndex(descriptor_number), desc->GetKey());
+ set(ToValueIndex(descriptor_number), desc->GetValue());
+ set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi());
}
-DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
+void DescriptorArray::Append(Descriptor* desc,
+ const WhitenessWitness& witness) {
+ int descriptor_number = number_of_descriptors();
+ int enumeration_index = descriptor_number + 1;
+ SetNumberOfDescriptors(descriptor_number + 1);
+ desc->SetEnumerationIndex(enumeration_index);
+ Set(descriptor_number, desc, witness);
+
+ uint32_t hash = desc->GetKey()->Hash();
+
+ int insertion;
+
+ for (insertion = descriptor_number; insertion > 0; --insertion) {
+ String* key = GetSortedKey(insertion - 1);
+ if (key->Hash() <= hash) break;
+ SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
+ }
+
+ SetSortedKey(insertion, descriptor_number);
+}
+
+
+void DescriptorArray::Append(Descriptor* desc) {
+ int descriptor_number = number_of_descriptors();
+ int enumeration_index = descriptor_number + 1;
+ SetNumberOfDescriptors(descriptor_number + 1);
+ desc->SetEnumerationIndex(enumeration_index);
+ Set(descriptor_number, desc);
+
+ uint32_t hash = desc->GetKey()->Hash();
+
+ int insertion;
+
+ for (insertion = descriptor_number; insertion > 0; --insertion) {
+ String* key = GetSortedKey(insertion - 1);
+ if (key->Hash() <= hash) break;
+ SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
+ }
+
+ SetSortedKey(insertion, descriptor_number);
+}
+
+
+void DescriptorArray::SwapSortedKeys(int first, int second) {
+ int first_key = GetSortedKeyIndex(first);
+ SetSortedKey(first, GetSortedKeyIndex(second));
+ SetSortedKey(second, first_key);
+}
+
+
+DescriptorArray::WhitenessWitness::WhitenessWitness(FixedArray* array)
: marking_(array->GetHeap()->incremental_marking()) {
marking_->EnterNoMarkingScope();
- if (array->number_of_descriptors() > 0) {
- ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
- ASSERT(Marking::Color(array->GetContentArray()) == Marking::WHITE_OBJECT);
- }
+ ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
}
@@ -2103,7 +2312,8 @@ int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
// EnsureCapacity will guarantee the hash table is never full.
while (true) {
Object* element = KeyAt(entry);
- // Empty entry.
+ // Empty entry. Uses raw unchecked accessors because it is called by the
+ // symbol table during bootstrapping.
if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
if (element != isolate->heap()->raw_unchecked_the_hole_value() &&
Shape::IsMatch(key, element)) return entry;
@@ -2258,18 +2468,18 @@ String* String::TryFlattenGetString(PretenureFlag pretenure) {
uint16_t String::Get(int index) {
ASSERT(index >= 0 && index < length());
switch (StringShape(this).full_representation_tag()) {
- case kSeqStringTag | kAsciiStringTag:
+ case kSeqStringTag | kOneByteStringTag:
return SeqAsciiString::cast(this)->SeqAsciiStringGet(index);
case kSeqStringTag | kTwoByteStringTag:
return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
- case kConsStringTag | kAsciiStringTag:
+ case kConsStringTag | kOneByteStringTag:
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(this)->ConsStringGet(index);
- case kExternalStringTag | kAsciiStringTag:
+ case kExternalStringTag | kOneByteStringTag:
return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
case kExternalStringTag | kTwoByteStringTag:
return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
- case kSlicedStringTag | kAsciiStringTag:
+ case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag:
return SlicedString::cast(this)->SlicedStringGet(index);
default:
@@ -2368,9 +2578,10 @@ String* SlicedString::parent() {
}
-void SlicedString::set_parent(String* parent) {
+void SlicedString::set_parent(String* parent, WriteBarrierMode mode) {
ASSERT(parent->IsSeqString() || parent->IsExternalString());
WRITE_FIELD(this, kParentOffset, parent);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode);
}
@@ -2874,16 +3085,12 @@ bool Map::has_non_instance_prototype() {
void Map::set_function_with_prototype(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kFunctionWithPrototype));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kFunctionWithPrototype));
- }
+ set_bit_field3(FunctionWithPrototype::update(bit_field3(), value));
}
bool Map::function_with_prototype() {
- return ((1 << kFunctionWithPrototype) & bit_field2()) != 0;
+ return FunctionWithPrototype::decode(bit_field3());
}
@@ -2915,28 +3122,21 @@ bool Map::is_extensible() {
void Map::set_attached_to_shared_function_info(bool value) {
- if (value) {
- set_bit_field3(bit_field3() | (1 << kAttachedToSharedFunctionInfo));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kAttachedToSharedFunctionInfo));
- }
+ set_bit_field3(AttachedToSharedFunctionInfo::update(bit_field3(), value));
}
bool Map::attached_to_shared_function_info() {
- return ((1 << kAttachedToSharedFunctionInfo) & bit_field3()) != 0;
+ return AttachedToSharedFunctionInfo::decode(bit_field3());
}
void Map::set_is_shared(bool value) {
- if (value) {
- set_bit_field3(bit_field3() | (1 << kIsShared));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kIsShared));
- }
+ set_bit_field3(IsShared::update(bit_field3(), value));
}
+
bool Map::is_shared() {
- return ((1 << kIsShared) & bit_field3()) != 0;
+ return IsShared::decode(bit_field3());
}
void Map::set_has_external_resource(bool value) {
@@ -2968,15 +3168,21 @@ bool Map::use_user_object_comparison() {
void Map::set_named_interceptor_is_fallback(bool value) {
- if (value) {
- set_bit_field3(bit_field3() | (1 << kNamedInterceptorIsFallback));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kNamedInterceptorIsFallback));
- }
+ set_bit_field3(NamedInterceptorIsFallback::update(bit_field3(), value));
}
bool Map::named_interceptor_is_fallback() {
- return ((1 << kNamedInterceptorIsFallback) & bit_field3()) != 0;
+ return NamedInterceptorIsFallback::decode(bit_field3());
+}
+
+
+void Map::set_dictionary_map(bool value) {
+ set_bit_field3(DictionaryMap::update(bit_field3(), value));
+}
+
+
+bool Map::is_dictionary_map() {
+ return DictionaryMap::decode(bit_field3());
}
@@ -2990,6 +3196,26 @@ Code::Flags Code::flags() {
}
+void Map::set_owns_descriptors(bool is_shared) {
+ set_bit_field3(OwnsDescriptors::update(bit_field3(), is_shared));
+}
+
+
+bool Map::owns_descriptors() {
+ return OwnsDescriptors::decode(bit_field3());
+}
+
+
+void Map::set_is_observed(bool is_observed) {
+ set_bit_field3(IsObserved::update(bit_field3(), is_observed));
+}
+
+
+bool Map::is_observed() {
+ return IsObserved::decode(bit_field3());
+}
+
+
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
// Make sure that all call stubs have an arguments count.
@@ -3024,7 +3250,7 @@ Code::ExtraICState Code::extra_ic_state() {
}
-PropertyType Code::type() {
+Code::StubType Code::type() {
return ExtractTypeFromFlags(flags());
}
@@ -3041,7 +3267,8 @@ int Code::major_key() {
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
kind() == TO_BOOLEAN_IC);
- return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
+ return StubMajorKeyField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
@@ -3052,7 +3279,9 @@ void Code::set_major_key(int major) {
kind() == COMPARE_IC ||
kind() == TO_BOOLEAN_IC);
ASSERT(0 <= major && major < 256);
- WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = StubMajorKeyField::update(previous, major);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
@@ -3154,39 +3383,50 @@ void Code::set_profiler_ticks(int ticks) {
unsigned Code::stack_slots() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_UINT32_FIELD(this, kStackSlotsOffset);
+ return StackSlotsField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_stack_slots(unsigned slots) {
+ CHECK(slots <= (1 << kStackSlotsBitCount));
ASSERT(kind() == OPTIMIZED_FUNCTION);
- WRITE_UINT32_FIELD(this, kStackSlotsOffset, slots);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = StackSlotsField::update(previous, slots);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
unsigned Code::safepoint_table_offset() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
+ return SafepointTableOffsetField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
void Code::set_safepoint_table_offset(unsigned offset) {
+ CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
ASSERT(kind() == OPTIMIZED_FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = SafepointTableOffsetField::update(previous, offset);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
unsigned Code::stack_check_table_offset() {
ASSERT_EQ(FUNCTION, kind());
- return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
+ return StackCheckTableOffsetField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
void Code::set_stack_check_table_offset(unsigned offset) {
ASSERT_EQ(FUNCTION, kind());
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = StackCheckTableOffsetField::update(previous, offset);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
@@ -3205,85 +3445,106 @@ void Code::set_check_type(CheckType value) {
byte Code::unary_op_type() {
ASSERT(is_unary_op_stub());
- return READ_BYTE_FIELD(this, kUnaryOpTypeOffset);
+ return UnaryOpTypeField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_unary_op_type(byte value) {
ASSERT(is_unary_op_stub());
- WRITE_BYTE_FIELD(this, kUnaryOpTypeOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = UnaryOpTypeField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::binary_op_type() {
ASSERT(is_binary_op_stub());
- return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
+ return BinaryOpTypeField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_binary_op_type(byte value) {
ASSERT(is_binary_op_stub());
- WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = BinaryOpTypeField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::binary_op_result_type() {
ASSERT(is_binary_op_stub());
- return READ_BYTE_FIELD(this, kBinaryOpReturnTypeOffset);
+ return BinaryOpResultTypeField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_binary_op_result_type(byte value) {
ASSERT(is_binary_op_stub());
- WRITE_BYTE_FIELD(this, kBinaryOpReturnTypeOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = BinaryOpResultTypeField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::compare_state() {
ASSERT(is_compare_ic_stub());
- return READ_BYTE_FIELD(this, kCompareStateOffset);
+ return CompareStateField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_compare_state(byte value) {
ASSERT(is_compare_ic_stub());
- WRITE_BYTE_FIELD(this, kCompareStateOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = CompareStateField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::compare_operation() {
ASSERT(is_compare_ic_stub());
- return READ_BYTE_FIELD(this, kCompareOperationOffset);
+ return CompareOperationField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_compare_operation(byte value) {
ASSERT(is_compare_ic_stub());
- WRITE_BYTE_FIELD(this, kCompareOperationOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = CompareOperationField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
byte Code::to_boolean_state() {
ASSERT(is_to_boolean_ic_stub());
- return READ_BYTE_FIELD(this, kToBooleanTypeOffset);
+ return ToBooleanStateField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_to_boolean_state(byte value) {
ASSERT(is_to_boolean_ic_stub());
- WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = ToBooleanStateField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
bool Code::has_function_cache() {
ASSERT(kind() == STUB);
- return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0;
+ return HasFunctionCacheField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_has_function_cache(bool flag) {
ASSERT(kind() == STUB);
- WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = HasFunctionCacheField::update(previous, flag);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
@@ -3296,7 +3557,7 @@ bool Code::is_inline_cache_stub() {
Code::Flags Code::ComputeFlags(Kind kind,
InlineCacheState ic_state,
ExtraICState extra_ic_state,
- PropertyType type,
+ StubType type,
int argc,
InlineCacheHolderFlag holder) {
// Extra IC state is only allowed for call IC stubs or for store IC
@@ -3317,7 +3578,7 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
- PropertyType type,
+ StubType type,
ExtraICState extra_ic_state,
InlineCacheHolderFlag holder,
int argc) {
@@ -3340,7 +3601,7 @@ Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
}
-PropertyType Code::ExtractTypeFromFlags(Flags flags) {
+Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
return TypeField::decode(flags);
}
@@ -3390,150 +3651,222 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
}
-DescriptorArray* Map::instance_descriptors() {
- Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
- if (object->IsSmi()) {
- return GetHeap()->empty_descriptor_array();
+// If the descriptor is using the empty transition array, install a new empty
+// transition array that will have place for an element transition.
+static MaybeObject* EnsureHasTransitionArray(Map* map) {
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions;
+ if (!map->HasTransitionArray()) {
+ maybe_transitions = TransitionArray::Allocate(0);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+ transitions->set_back_pointer_storage(map->GetBackPointer());
+ } else if (!map->transitions()->IsFullTransitionArray()) {
+ maybe_transitions = map->transitions()->ExtendToFullTransitionArray();
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
} else {
- return DescriptorArray::cast(object);
+ return map;
}
+ map->set_transitions(transitions);
+ return transitions;
}
-void Map::init_instance_descriptors() {
- WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, Smi::FromInt(0));
-}
-
-
-void Map::clear_instance_descriptors() {
- Object* object = READ_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset);
- if (!object->IsSmi()) {
+void Map::InitializeDescriptors(DescriptorArray* descriptors) {
+ int len = descriptors->number_of_descriptors();
#ifdef DEBUG
- ZapInstanceDescriptors();
-#endif
- WRITE_FIELD(
- this,
- kInstanceDescriptorsOrBitField3Offset,
- Smi::FromInt(DescriptorArray::cast(object)->bit_field3_storage()));
+ ASSERT(len <= DescriptorArray::kMaxNumberOfDescriptors);
+
+ bool used_indices[DescriptorArray::kMaxNumberOfDescriptors];
+ for (int i = 0; i < len; ++i) used_indices[i] = false;
+
+ // Ensure that all enumeration indexes between 1 and length occur uniquely in
+ // the descriptor array.
+ for (int i = 0; i < len; ++i) {
+ int enum_index = descriptors->GetDetails(i).descriptor_index() -
+ PropertyDetails::kInitialIndex;
+ ASSERT(0 <= enum_index && enum_index < len);
+ ASSERT(!used_indices[enum_index]);
+ used_indices[enum_index] = true;
}
+#endif
+
+ set_instance_descriptors(descriptors);
+ SetNumberOfOwnDescriptors(len);
}
-void Map::set_instance_descriptors(DescriptorArray* value,
- WriteBarrierMode mode) {
- Object* object = READ_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset);
- Heap* heap = GetHeap();
- if (value == heap->empty_descriptor_array()) {
- clear_instance_descriptors();
- return;
- } else {
- if (object->IsSmi()) {
- value->set_bit_field3_storage(Smi::cast(object)->value());
- } else {
- value->set_bit_field3_storage(
- DescriptorArray::cast(object)->bit_field3_storage());
- }
- }
- ASSERT(!is_shared());
-#ifdef DEBUG
- if (value != instance_descriptors()) {
- ZapInstanceDescriptors();
- }
-#endif
- WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
-}
+ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
+SMI_ACCESSORS(Map, bit_field3, kBitField3Offset)
-int Map::bit_field3() {
- Object* object = READ_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset);
- if (object->IsSmi()) {
- return Smi::cast(object)->value();
- } else {
- return DescriptorArray::cast(object)->bit_field3_storage();
+void Map::ClearTransitions(Heap* heap, WriteBarrierMode mode) {
+ Object* back_pointer = GetBackPointer();
+
+ if (Heap::ShouldZapGarbage() && HasTransitionArray()) {
+ ZapTransitions();
}
+
+ WRITE_FIELD(this, kTransitionsOrBackPointerOffset, back_pointer);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kTransitionsOrBackPointerOffset, back_pointer, mode);
}
-void Map::set_bit_field3(int value) {
- ASSERT(Smi::IsValid(value));
- Object* object = READ_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset);
- if (object->IsSmi()) {
- WRITE_FIELD(this,
- kInstanceDescriptorsOrBitField3Offset,
- Smi::FromInt(value));
- } else {
- DescriptorArray::cast(object)->set_bit_field3_storage(value);
- }
+void Map::AppendDescriptor(Descriptor* desc,
+ const DescriptorArray::WhitenessWitness& witness) {
+ DescriptorArray* descriptors = instance_descriptors();
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ descriptors->Append(desc, witness);
+ SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
}
Object* Map::GetBackPointer() {
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- if (object->IsFixedArray()) {
- return FixedArray::cast(object)->get(kProtoTransitionBackPointerOffset);
+ Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
+ if (object->IsDescriptorArray()) {
+ return TransitionArray::cast(object)->back_pointer_storage();
} else {
+ ASSERT(object->IsMap() || object->IsUndefined());
return object;
}
}
-void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
- Heap* heap = GetHeap();
- ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
- ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
- (value->IsMap() && GetBackPointer()->IsUndefined()));
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- if (object->IsFixedArray()) {
- FixedArray::cast(object)->set(
- kProtoTransitionBackPointerOffset, value, mode);
- } else {
- WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
- }
+bool Map::HasElementsTransition() {
+ return HasTransitionArray() && transitions()->HasElementsTransition();
}
-FixedArray* Map::prototype_transitions() {
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- if (object->IsFixedArray()) {
- return FixedArray::cast(object);
- } else {
+bool Map::HasTransitionArray() {
+ Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
+ return object->IsTransitionArray();
+}
+
+
+Map* Map::elements_transition_map() {
+ return transitions()->elements_transition();
+}
+
+
+bool Map::CanHaveMoreTransitions() {
+ if (!HasTransitionArray()) return true;
+ return FixedArray::SizeFor(transitions()->length() +
+ TransitionArray::kTransitionSize)
+ <= Page::kMaxNonCodeHeapObjectSize;
+}
+
+
+MaybeObject* Map::AddTransition(String* key,
+ Map* target,
+ SimpleTransitionFlag flag) {
+ if (HasTransitionArray()) return transitions()->CopyInsert(key, target);
+ return TransitionArray::NewWith(flag, key, target, GetBackPointer());
+}
+
+
+void Map::SetTransition(int transition_index, Map* target) {
+ transitions()->SetTarget(transition_index, target);
+}
+
+
+Map* Map::GetTransition(int transition_index) {
+ return transitions()->GetTarget(transition_index);
+}
+
+
+MaybeObject* Map::set_elements_transition_map(Map* transitioned_map) {
+ MaybeObject* allow_elements = EnsureHasTransitionArray(this);
+ if (allow_elements->IsFailure()) return allow_elements;
+ transitions()->set_elements_transition(transitioned_map);
+ return this;
+}
+
+
+FixedArray* Map::GetPrototypeTransitions() {
+ if (!HasTransitionArray()) return GetHeap()->empty_fixed_array();
+ if (!transitions()->HasPrototypeTransitions()) {
return GetHeap()->empty_fixed_array();
}
+ return transitions()->GetPrototypeTransitions();
}
-void Map::set_prototype_transitions(FixedArray* value, WriteBarrierMode mode) {
- Heap* heap = GetHeap();
- ASSERT(value != heap->empty_fixed_array());
- value->set(kProtoTransitionBackPointerOffset, GetBackPointer());
+MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
+ MaybeObject* allow_prototype = EnsureHasTransitionArray(this);
+ if (allow_prototype->IsFailure()) return allow_prototype;
#ifdef DEBUG
- if (value != prototype_transitions()) {
+ if (HasPrototypeTransitions()) {
+ ASSERT(GetPrototypeTransitions() != proto_transitions);
ZapPrototypeTransitions();
}
#endif
- WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
+ transitions()->SetPrototypeTransitions(proto_transitions);
+ return this;
+}
+
+
+bool Map::HasPrototypeTransitions() {
+ return HasTransitionArray() && transitions()->HasPrototypeTransitions();
+}
+
+
+TransitionArray* Map::transitions() {
+ ASSERT(HasTransitionArray());
+ Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
+ return TransitionArray::cast(object);
+}
+
+
+void Map::set_transitions(TransitionArray* transition_array,
+ WriteBarrierMode mode) {
+ // In release mode, only run this code if verify_heap is on.
+ if (Heap::ShouldZapGarbage() && HasTransitionArray()) {
+ CHECK(transitions() != transition_array);
+ ZapTransitions();
+ }
+
+ WRITE_FIELD(this, kTransitionsOrBackPointerOffset, transition_array);
CONDITIONAL_WRITE_BARRIER(
- heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
+ GetHeap(), this, kTransitionsOrBackPointerOffset, transition_array, mode);
}
-void Map::init_prototype_transitions(Object* undefined) {
+void Map::init_back_pointer(Object* undefined) {
ASSERT(undefined->IsUndefined());
- WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, undefined);
+ WRITE_FIELD(this, kTransitionsOrBackPointerOffset, undefined);
+}
+
+
+void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
+ ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
+ ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
+ (value->IsMap() && GetBackPointer()->IsUndefined()));
+ Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
+ if (object->IsTransitionArray()) {
+ TransitionArray::cast(object)->set_back_pointer_storage(value);
+ } else {
+ WRITE_FIELD(this, kTransitionsOrBackPointerOffset, value);
+ CONDITIONAL_WRITE_BARRIER(
+ GetHeap(), this, kTransitionsOrBackPointerOffset, value, mode);
+ }
+}
+
+
+// Can either be Smi (no transitions), normal transition array, or a transition
+// array with the header overwritten as a Smi (thus iterating).
+TransitionArray* Map::unchecked_transition_array() {
+ Object* object = *HeapObject::RawField(this,
+ Map::kTransitionsOrBackPointerOffset);
+ TransitionArray* transition_array = static_cast<TransitionArray*>(object);
+ return transition_array;
}
-HeapObject* Map::unchecked_prototype_transitions() {
- Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
- return reinterpret_cast<HeapObject*>(object);
+HeapObject* Map::UncheckedPrototypeTransitions() {
+ ASSERT(HasTransitionArray());
+ ASSERT(unchecked_transition_array()->HasPrototypeTransitions());
+ return unchecked_transition_array()->UncheckedPrototypeTransitions();
}
@@ -3542,22 +3875,22 @@ ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
-ACCESSORS(JSFunction,
- next_function_link,
- Object,
- kNextFunctionLinkOffset)
+ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
+ACCESSORS(GlobalObject, native_context, Context, kNativeContextOffset)
ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
-ACCESSORS(JSGlobalProxy, context, Object, kContextOffset)
+ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
+ACCESSORS(AccessorInfo, expected_receiver_type, Object,
+ kExpectedReceiverTypeOffset)
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -3606,7 +3939,7 @@ ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
kInternalFieldCountOffset)
ACCESSORS(ObjectTemplateInfo, has_external_resource, Object,
kHasExternalResourceOffset)
-ACCESSORS(ObjectTemplateInfo, use_user_object_comparison, Object,
+ACCESSORS(ObjectTemplateInfo, use_user_object_comparison, Object,
kUseUserObjectComparisonOffset)
ACCESSORS(SignatureInfo, receiver, Object, kReceiverOffset)
@@ -3643,6 +3976,8 @@ ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
+ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
+ kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
@@ -3653,7 +3988,7 @@ ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
kThisPropertyAssignmentsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
+SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
@@ -3677,6 +4012,10 @@ BOOL_ACCESSORS(SharedFunctionInfo,
kAllowLazyCompilation)
BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
+ allows_lazy_compilation_without_context,
+ kAllowLazyCompilationWithoutContext)
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
uses_arguments,
kUsesArguments)
BOOL_ACCESSORS(SharedFunctionInfo,
@@ -3702,8 +4041,10 @@ SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
+SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
+SMI_ACCESSORS(SharedFunctionInfo,
+ stress_deopt_counter,
+ kStressDeoptCounterOffset)
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
@@ -3755,8 +4096,10 @@ PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
kThisPropertyAssignmentsCountOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, counters, kCountersOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
+ stress_deopt_counter,
+ kStressDeoptCounterOffset)
#endif
@@ -3851,6 +4194,18 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
kDontOptimize)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
+
+void SharedFunctionInfo::BeforeVisitingPointers() {
+ if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
+
+ // Flush optimized code map on major GC.
+ // Note: we may experiment with rebuilding it or retaining entries
+ // which should survive as we iterate through optimized functions
+ // anyway.
+ set_optimized_code_map(Smi::FromInt(0));
+}
+
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -3959,14 +4314,66 @@ void SharedFunctionInfo::set_code_age(int code_age) {
}
+int SharedFunctionInfo::ic_age() {
+ return ICAgeBits::decode(counters());
+}
+
+
+void SharedFunctionInfo::set_ic_age(int ic_age) {
+ set_counters(ICAgeBits::update(counters(), ic_age));
+}
+
+
+int SharedFunctionInfo::deopt_count() {
+ return DeoptCountBits::decode(counters());
+}
+
+
+void SharedFunctionInfo::set_deopt_count(int deopt_count) {
+ set_counters(DeoptCountBits::update(counters(), deopt_count));
+}
+
+
+void SharedFunctionInfo::increment_deopt_count() {
+ int value = counters();
+ int deopt_count = DeoptCountBits::decode(value);
+ deopt_count = (deopt_count + 1) & DeoptCountBits::kMax;
+ set_counters(DeoptCountBits::update(value, deopt_count));
+}
+
+
+int SharedFunctionInfo::opt_reenable_tries() {
+ return OptReenableTriesBits::decode(counters());
+}
+
+
+void SharedFunctionInfo::set_opt_reenable_tries(int tries) {
+ set_counters(OptReenableTriesBits::update(counters(), tries));
+}
+
+
bool SharedFunctionInfo::has_deoptimization_support() {
Code* code = this->code();
return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
}
+void SharedFunctionInfo::TryReenableOptimization() {
+ int tries = opt_reenable_tries();
+ set_opt_reenable_tries((tries + 1) & OptReenableTriesBits::kMax);
+ // We reenable optimization whenever the number of tries is a large
+ // enough power of 2.
+ if (tries >= 16 && (((tries - 1) & tries) == 0)) {
+ set_optimization_disabled(false);
+ set_opt_count(0);
+ set_deopt_count(0);
+ code()->set_optimizable(true);
+ }
+}
+
+
bool JSFunction::IsBuiltin() {
- return context()->global()->IsJSBuiltinsObject();
+ return context()->global_object()->IsJSBuiltinsObject();
}
@@ -3991,6 +4398,18 @@ bool JSFunction::IsMarkedForLazyRecompilation() {
}
+bool JSFunction::IsMarkedForParallelRecompilation() {
+ return code() ==
+ GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile);
+}
+
+
+bool JSFunction::IsInRecompileQueue() {
+ return code() == GetIsolate()->builtins()->builtin(
+ Builtins::kInRecompileQueue);
+}
+
+
Code* JSFunction::code() {
return Code::cast(unchecked_code());
}
@@ -4022,10 +4441,10 @@ void JSFunction::ReplaceCode(Code* code) {
// Add/remove the function from the list of optimized functions for this
// context based on the state change.
if (!was_optimized && is_optimized) {
- context()->global_context()->AddOptimizedFunction(this);
+ context()->native_context()->AddOptimizedFunction(this);
}
if (was_optimized && !is_optimized) {
- context()->global_context()->RemoveOptimizedFunction(this);
+ context()->native_context()->RemoveOptimizedFunction(this);
}
}
@@ -4066,40 +4485,6 @@ void JSFunction::set_initial_map(Map* value) {
}
-MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
- Map* initial_map) {
- Context* global_context = context()->global_context();
- Object* array_function =
- global_context->get(Context::ARRAY_FUNCTION_INDEX);
- if (array_function->IsJSFunction() &&
- this == JSFunction::cast(array_function)) {
- ASSERT(initial_map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
-
- MaybeObject* maybe_map = initial_map->CopyDropTransitions();
- Map* new_double_map = NULL;
- if (!maybe_map->To<Map>(&new_double_map)) return maybe_map;
- new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
- maybe_map = initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS,
- new_double_map);
- if (maybe_map->IsFailure()) return maybe_map;
-
- maybe_map = new_double_map->CopyDropTransitions();
- Map* new_object_map = NULL;
- if (!maybe_map->To<Map>(&new_object_map)) return maybe_map;
- new_object_map->set_elements_kind(FAST_ELEMENTS);
- maybe_map = new_double_map->AddElementsTransition(FAST_ELEMENTS,
- new_object_map);
- if (maybe_map->IsFailure()) return maybe_map;
-
- global_context->set_smi_js_array_map(initial_map);
- global_context->set_double_js_array_map(new_double_map);
- global_context->set_object_js_array_map(new_object_map);
- }
- set_initial_map(initial_map);
- return this;
-}
-
-
bool JSFunction::has_initial_map() {
return prototype_or_initial_map()->IsMap();
}
@@ -4132,6 +4517,7 @@ Object* JSFunction::prototype() {
return instance_prototype();
}
+
bool JSFunction::should_have_prototype() {
return map()->function_with_prototype();
}
@@ -4235,6 +4621,7 @@ void Foreign::set_foreign_address(Address value) {
ACCESSORS(JSModule, context, Object, kContextOffset)
+ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset)
JSModule* JSModule::cast(Object* obj) {
@@ -4429,18 +4816,18 @@ ElementsKind JSObject::GetElementsKind() {
FixedArrayBase* fixed_array =
reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
Map* map = fixed_array->map();
- ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) &&
- (map == GetHeap()->fixed_array_map() ||
- map == GetHeap()->fixed_cow_array_map())) ||
- (kind == FAST_DOUBLE_ELEMENTS &&
- (fixed_array->IsFixedDoubleArray() ||
- fixed_array == GetHeap()->empty_fixed_array())) ||
- (kind == DICTIONARY_ELEMENTS &&
+ ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
+ (map == GetHeap()->fixed_array_map() ||
+ map == GetHeap()->fixed_cow_array_map())) ||
+ (IsFastDoubleElementsKind(kind) &&
+ (fixed_array->IsFixedDoubleArray() ||
+ fixed_array == GetHeap()->empty_fixed_array())) ||
+ (kind == DICTIONARY_ELEMENTS &&
fixed_array->IsFixedArray() &&
- fixed_array->IsDictionary()) ||
- (kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
- (elements()->IsFixedArray() && elements()->length() >= 2));
+ fixed_array->IsDictionary()) ||
+ (kind > DICTIONARY_ELEMENTS));
+ ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+ (elements()->IsFixedArray() && elements()->length() >= 2));
#endif
return kind;
}
@@ -4451,25 +4838,28 @@ ElementsAccessor* JSObject::GetElementsAccessor() {
}
-bool JSObject::HasFastElements() {
- return GetElementsKind() == FAST_ELEMENTS;
+bool JSObject::HasFastObjectElements() {
+ return IsFastObjectElementsKind(GetElementsKind());
}
-bool JSObject::HasFastSmiOnlyElements() {
- return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS;
+bool JSObject::HasFastSmiElements() {
+ return IsFastSmiElementsKind(GetElementsKind());
}
-bool JSObject::HasFastTypeElements() {
- ElementsKind elements_kind = GetElementsKind();
- return elements_kind == FAST_SMI_ONLY_ELEMENTS ||
- elements_kind == FAST_ELEMENTS;
+bool JSObject::HasFastSmiOrObjectElements() {
+ return IsFastSmiOrObjectElementsKind(GetElementsKind());
}
bool JSObject::HasFastDoubleElements() {
- return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
+ return IsFastDoubleElementsKind(GetElementsKind());
+}
+
+
+bool JSObject::HasFastHoleyElements() {
+ return IsFastHoleyElementsKind(GetElementsKind());
}
@@ -4526,7 +4916,7 @@ bool JSObject::HasIndexedInterceptor() {
MaybeObject* JSObject::EnsureWritableFastElements() {
- ASSERT(HasFastTypeElements());
+ ASSERT(HasFastSmiOrObjectElements());
FixedArray* elems = FixedArray::cast(elements());
Isolate* isolate = GetIsolate();
if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -4579,8 +4969,7 @@ StringHasher::StringHasher(int length, uint32_t seed)
raw_running_hash_(seed),
array_index_(0),
is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
- is_first_char_(true),
- is_valid_(true) {
+ is_first_char_(true) {
ASSERT(FLAG_randomize_hashes || raw_running_hash_ == 0);
}
@@ -4590,6 +4979,25 @@ bool StringHasher::has_trivial_hash() {
}
+uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint32_t c) {
+ running_hash += c;
+ running_hash += (running_hash << 10);
+ running_hash ^= (running_hash >> 6);
+ return running_hash;
+}
+
+
+uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
+ running_hash += (running_hash << 3);
+ running_hash ^= (running_hash >> 11);
+ running_hash += (running_hash << 15);
+ if ((running_hash & String::kHashBitMask) == 0) {
+ return kZeroHash;
+ }
+ return running_hash;
+}
+
+
void StringHasher::AddCharacter(uint32_t c) {
if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
AddSurrogatePair(c); // Not inlined.
@@ -4597,9 +5005,7 @@ void StringHasher::AddCharacter(uint32_t c) {
}
// Use the Jenkins one-at-a-time hash function to update the hash
// for the given character.
- raw_running_hash_ += c;
- raw_running_hash_ += (raw_running_hash_ << 10);
- raw_running_hash_ ^= (raw_running_hash_ >> 6);
+ raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
// Incremental array index computation.
if (is_array_index_) {
if (c < '0' || c > '9') {
@@ -4629,23 +5035,14 @@ void StringHasher::AddCharacterNoIndex(uint32_t c) {
AddSurrogatePairNoIndex(c); // Not inlined.
return;
}
- raw_running_hash_ += c;
- raw_running_hash_ += (raw_running_hash_ << 10);
- raw_running_hash_ ^= (raw_running_hash_ >> 6);
+ raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
}
uint32_t StringHasher::GetHash() {
// Get the calculated raw hash value and do some more bit ops to distribute
// the hash further. Ensure that we never return zero as the hash value.
- uint32_t result = raw_running_hash_;
- result += (result << 3);
- result ^= (result >> 11);
- result += (result << 15);
- if ((result & String::kHashBitMask) == 0) {
- result = 27;
- }
- return result;
+ return GetHashCore(raw_running_hash_);
}
@@ -4675,7 +5072,12 @@ bool String::AsArrayIndex(uint32_t* index) {
Object* JSReceiver::GetPrototype() {
- return HeapObject::cast(this)->map()->prototype();
+ return map()->prototype();
+}
+
+
+Object* JSReceiver::GetConstructor() {
+ return map()->constructor();
}
@@ -4699,6 +5101,16 @@ PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) {
return GetPropertyAttributeWithReceiver(this, key);
}
+
+PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
+ }
+ return JSObject::cast(this)->GetElementAttributeWithReceiver(
+ this, index, true);
+}
+
+
// TODO(504): this may be useful in other places too where JSGlobalProxy
// is used.
Object* JSObject::BypassGlobalProxy() {
@@ -4723,7 +5135,26 @@ bool JSReceiver::HasElement(uint32_t index) {
if (IsJSProxy()) {
return JSProxy::cast(this)->HasElementWithHandler(index);
}
- return JSObject::cast(this)->HasElementWithReceiver(this, index);
+ return JSObject::cast(this)->GetElementAttributeWithReceiver(
+ this, index, true) != ABSENT;
+}
+
+
+bool JSReceiver::HasLocalElement(uint32_t index) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->HasElementWithHandler(index);
+ }
+ return JSObject::cast(this)->GetElementAttributeWithReceiver(
+ this, index, false) != ABSENT;
+}
+
+
+PropertyAttributes JSReceiver::GetLocalElementAttribute(uint32_t index) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
+ }
+ return JSObject::cast(this)->GetElementAttributeWithReceiver(
+ this, index, false);
}
@@ -4767,6 +5198,13 @@ void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
}
+bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
+ Object* function_template = expected_receiver_type();
+ if (!function_template->IsFunctionTemplateInfo()) return true;
+ return receiver->IsInstanceOf(FunctionTemplateInfo::cast(function_template));
+}
+
+
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
@@ -4780,7 +5218,9 @@ void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
Object* value,
PropertyDetails details) {
- ASSERT(!key->IsString() || details.IsDeleted() || details.index() > 0);
+ ASSERT(!key->IsString() ||
+ details.IsDeleted() ||
+ details.dictionary_index() > 0);
int index = HashTable<Shape, Key>::EntryToIndex(entry);
AssertNoAllocation no_gc;
WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
@@ -4878,13 +5318,13 @@ void Map::ClearCodeCache(Heap* heap) {
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
// - IncrementalMarking::Step
- ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
- WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
+ ASSERT(!heap->InNewSpace(heap->empty_fixed_array()));
+ WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array());
}
void JSArray::EnsureSize(int required_size) {
- ASSERT(HasFastTypeElements());
+ ASSERT(HasFastSmiOrObjectElements());
FixedArray* elts = FixedArray::cast(elements());
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
@@ -4916,13 +5356,13 @@ bool JSArray::AllowsSetElementsLength() {
MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
MaybeObject* maybe_result = EnsureCanContainElements(
- storage, ALLOW_COPIED_DOUBLE_ELEMENTS);
+ storage, storage->length(), ALLOW_COPIED_DOUBLE_ELEMENTS);
if (maybe_result->IsFailure()) return maybe_result;
ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
- GetElementsKind() == FAST_DOUBLE_ELEMENTS) ||
+ IsFastDoubleElementsKind(GetElementsKind())) ||
((storage->map() != GetHeap()->fixed_double_array_map()) &&
- ((GetElementsKind() == FAST_ELEMENTS) ||
- (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
+ (IsFastObjectElementsKind(GetElementsKind()) ||
+ (IsFastSmiElementsKind(GetElementsKind()) &&
FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
set_elements(storage);
set_length(Smi::FromInt(storage->length()));
@@ -4942,13 +5382,13 @@ MaybeObject* FixedDoubleArray::Copy() {
}
-void TypeFeedbackCells::SetAstId(int index, Smi* id) {
- set(1 + index * 2, id);
+void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
+ set(1 + index * 2, Smi::FromInt(id.ToInt()));
}
-Smi* TypeFeedbackCells::AstId(int index) {
- return Smi::cast(get(1 + index * 2));
+TypeFeedbackId TypeFeedbackCells::AstId(int index) {
+ return TypeFeedbackId(Smi::cast(get(1 + index * 2))->value());
}
@@ -4973,13 +5413,88 @@ Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
- return heap->raw_unchecked_the_hole_value();
+ return heap->the_hole_value();
+}
+
+
+int TypeFeedbackInfo::ic_total_count() {
+ int current = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ return ICTotalCountField::decode(current);
+}
+
+
+void TypeFeedbackInfo::set_ic_total_count(int count) {
+ int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ value = ICTotalCountField::update(value,
+ ICTotalCountField::decode(count));
+ WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
+}
+
+
+int TypeFeedbackInfo::ic_with_type_info_count() {
+ int current = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ return ICsWithTypeInfoCountField::decode(current);
+}
+
+
+void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) {
+ int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int new_count = ICsWithTypeInfoCountField::decode(value) + delta;
+ // We can get negative count here when the type-feedback info is
+ // shared between two code objects. The can only happen when
+ // the debugger made a shallow copy of code object (see Heap::CopyCode).
+ // Since we do not optimize when the debugger is active, we can skip
+ // this counter update.
+ if (new_count >= 0) {
+ new_count &= ICsWithTypeInfoCountField::kMask;
+ value = ICsWithTypeInfoCountField::update(value, new_count);
+ WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
+ }
+}
+
+
+void TypeFeedbackInfo::initialize_storage() {
+ WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(0));
+ WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(0));
+}
+
+
+void TypeFeedbackInfo::change_own_type_change_checksum() {
+ int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ int checksum = OwnTypeChangeChecksum::decode(value);
+ checksum = (checksum + 1) % (1 << kTypeChangeChecksumBits);
+ value = OwnTypeChangeChecksum::update(value, checksum);
+ // Ensure packed bit field is in Smi range.
+ if (value > Smi::kMaxValue) value |= Smi::kMinValue;
+ if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
+ WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
+}
+
+
+void TypeFeedbackInfo::set_inlined_type_change_checksum(int checksum) {
+ int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int mask = (1 << kTypeChangeChecksumBits) - 1;
+ value = InlinedTypeChangeChecksum::update(value, checksum & mask);
+ // Ensure packed bit field is in Smi range.
+ if (value > Smi::kMaxValue) value |= Smi::kMinValue;
+ if (value < Smi::kMinValue) value &= ~Smi::kMinValue;
+ WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(value));
+}
+
+
+int TypeFeedbackInfo::own_type_change_checksum() {
+ int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ return OwnTypeChangeChecksum::decode(value);
+}
+
+
+bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
+ int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int mask = (1 << kTypeChangeChecksumBits) - 1;
+ return InlinedTypeChangeChecksum::decode(value) == (checksum & mask);
}
-SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset)
-SMI_ACCESSORS(TypeFeedbackInfo, ic_with_type_info_count,
- kIcWithTypeinfoCountOffset)
ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
kTypeFeedbackCellsOffset)
@@ -5049,14 +5564,13 @@ void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
-#define SLOT_ADDR(obj, offset) \
- reinterpret_cast<Object**>((obj)->address() + offset)
template<int start_offset, int end_offset, int size>
void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
HeapObject* obj,
ObjectVisitor* v) {
- v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, end_offset));
+ v->VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
}
@@ -5064,10 +5578,10 @@ template<int start_offset>
void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
int object_size,
ObjectVisitor* v) {
- v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, object_size));
+ v->VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, object_size));
}
-#undef SLOT_ADDR
#undef TYPE_CHECKER
#undef CAST_ACCESSOR
diff --git a/src/3rdparty/v8/src/objects-printer.cc b/src/3rdparty/v8/src/objects-printer.cc
index febdaab..6e87c7a 100644
--- a/src/3rdparty/v8/src/objects-printer.cc
+++ b/src/3rdparty/v8/src/objects-printer.cc
@@ -57,12 +57,12 @@ void MaybeObject::Print(FILE* out) {
void MaybeObject::PrintLn(FILE* out) {
Print(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
void HeapObject::PrintHeader(FILE* out, const char* id) {
- PrintF(out, "%p: [%s]\n", reinterpret_cast<void*>(this), id);
+ FPrintF(out, "%p: [%s]\n", reinterpret_cast<void*>(this), id);
}
@@ -124,7 +124,7 @@ void HeapObject::HeapObjectPrint(FILE* out) {
ExternalDoubleArray::cast(this)->ExternalDoubleArrayPrint(out);
break;
case FILLER_TYPE:
- PrintF(out, "filler");
+ FPrintF(out, "filler");
break;
case JS_OBJECT_TYPE: // fall through
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -151,7 +151,7 @@ void HeapObject::HeapObjectPrint(FILE* out) {
JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(out);
break;
case JS_VALUE_TYPE:
- PrintF(out, "Value wrapper around:");
+ FPrintF(out, "Value wrapper around:");
JSValue::cast(this)->value()->Print(out);
break;
case JS_DATE_TYPE:
@@ -189,7 +189,7 @@ void HeapObject::HeapObjectPrint(FILE* out) {
#undef MAKE_STRUCT_CASE
default:
- PrintF(out, "UNKNOWN TYPE %d", map()->instance_type());
+ FPrintF(out, "UNKNOWN TYPE %d", map()->instance_type());
UNREACHABLE();
break;
}
@@ -197,113 +197,88 @@ void HeapObject::HeapObjectPrint(FILE* out) {
void ByteArray::ByteArrayPrint(FILE* out) {
- PrintF(out, "byte array, data starts at %p", GetDataStartAddress());
+ FPrintF(out, "byte array, data starts at %p", GetDataStartAddress());
}
void FreeSpace::FreeSpacePrint(FILE* out) {
- PrintF(out, "free space, size %d", Size());
+ FPrintF(out, "free space, size %d", Size());
}
void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
- PrintF(out, "external pixel array");
+ FPrintF(out, "external pixel array");
}
void ExternalByteArray::ExternalByteArrayPrint(FILE* out) {
- PrintF(out, "external byte array");
+ FPrintF(out, "external byte array");
}
void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint(FILE* out) {
- PrintF(out, "external unsigned byte array");
+ FPrintF(out, "external unsigned byte array");
}
void ExternalShortArray::ExternalShortArrayPrint(FILE* out) {
- PrintF(out, "external short array");
+ FPrintF(out, "external short array");
}
void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint(FILE* out) {
- PrintF(out, "external unsigned short array");
+ FPrintF(out, "external unsigned short array");
}
void ExternalIntArray::ExternalIntArrayPrint(FILE* out) {
- PrintF(out, "external int array");
+ FPrintF(out, "external int array");
}
void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint(FILE* out) {
- PrintF(out, "external unsigned int array");
+ FPrintF(out, "external unsigned int array");
}
void ExternalFloatArray::ExternalFloatArrayPrint(FILE* out) {
- PrintF(out, "external float array");
+ FPrintF(out, "external float array");
}
void ExternalDoubleArray::ExternalDoubleArrayPrint(FILE* out) {
- PrintF(out, "external double array");
+ FPrintF(out, "external double array");
}
void JSObject::PrintProperties(FILE* out) {
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PrintF(out, " ");
+ for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
+ FPrintF(out, " ");
descs->GetKey(i)->StringPrint(out);
- PrintF(out, ": ");
+ FPrintF(out, ": ");
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
FastPropertyAt(index)->ShortPrint(out);
- PrintF(out, " (field at offset %d)\n", index);
+ FPrintF(out, " (field at offset %d)\n", index);
break;
}
case CONSTANT_FUNCTION:
descs->GetConstantFunction(i)->ShortPrint(out);
- PrintF(out, " (constant function)\n");
+ FPrintF(out, " (constant function)\n");
break;
case CALLBACKS:
descs->GetCallbacksObject(i)->ShortPrint(out);
- PrintF(out, " (callback)\n");
- break;
- case ELEMENTS_TRANSITION: {
- PrintF(out, "(elements transition to ");
- Object* descriptor_contents = descs->GetValue(i);
- if (descriptor_contents->IsMap()) {
- Map* map = Map::cast(descriptor_contents);
- PrintElementsKind(out, map->elements_kind());
- } else {
- FixedArray* map_array = FixedArray::cast(descriptor_contents);
- for (int i = 0; i < map_array->length(); ++i) {
- Map* map = Map::cast(map_array->get(i));
- if (i != 0) {
- PrintF(out, ", ");
- }
- PrintElementsKind(out, map->elements_kind());
- }
- }
- PrintF(out, ")\n");
- break;
- }
- case MAP_TRANSITION:
- PrintF(out, "(map transition)\n");
- break;
- case CONSTANT_TRANSITION:
- PrintF(out, "(constant transition)\n");
- break;
- case NULL_DESCRIPTOR:
- PrintF(out, "(null descriptor)\n");
+ FPrintF(out, " (callback)\n");
break;
case NORMAL: // only in slow mode
case HANDLER: // only in lookup results, not in descriptors
case INTERCEPTOR: // only in lookup results, not in descriptors
+ // There are no transitions in the descriptor array.
+ case TRANSITION:
+ case NONEXISTENT:
UNREACHABLE();
break;
}
@@ -318,28 +293,31 @@ void JSObject::PrintElements(FILE* out) {
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
switch (map()->elements_kind()) {
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
// Print in array notation for non-sparse arrays.
FixedArray* p = FixedArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: ", i);
+ FPrintF(out, " %d: ", i);
p->get(i)->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
break;
}
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// Print in array notation for non-sparse arrays.
if (elements()->length() > 0) {
FixedDoubleArray* p = FixedDoubleArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
if (p->is_the_hole(i)) {
- PrintF(out, " %d: <the hole>", i);
+ FPrintF(out, " %d: <the hole>", i);
} else {
- PrintF(out, " %d: %g", i, p->get_scalar(i));
+ FPrintF(out, " %d: %g", i, p->get_scalar(i));
}
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
}
break;
@@ -347,14 +325,14 @@ void JSObject::PrintElements(FILE* out) {
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* p = ExternalPixelArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, p->get_scalar(i));
+ FPrintF(out, " %d: %d\n", i, p->get_scalar(i));
}
break;
}
case EXTERNAL_BYTE_ELEMENTS: {
ExternalByteArray* p = ExternalByteArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
+ FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
}
break;
}
@@ -362,14 +340,14 @@ void JSObject::PrintElements(FILE* out) {
ExternalUnsignedByteArray* p =
ExternalUnsignedByteArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
+ FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
}
break;
}
case EXTERNAL_SHORT_ELEMENTS: {
ExternalShortArray* p = ExternalShortArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
+ FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
}
break;
}
@@ -377,14 +355,14 @@ void JSObject::PrintElements(FILE* out) {
ExternalUnsignedShortArray* p =
ExternalUnsignedShortArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
+ FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
}
break;
}
case EXTERNAL_INT_ELEMENTS: {
ExternalIntArray* p = ExternalIntArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
+ FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
}
break;
}
@@ -392,21 +370,21 @@ void JSObject::PrintElements(FILE* out) {
ExternalUnsignedIntArray* p =
ExternalUnsignedIntArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
+ FPrintF(out, " %d: %d\n", i, static_cast<int>(p->get_scalar(i)));
}
break;
}
case EXTERNAL_FLOAT_ELEMENTS: {
ExternalFloatArray* p = ExternalFloatArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %f\n", i, p->get_scalar(i));
+ FPrintF(out, " %d: %f\n", i, p->get_scalar(i));
}
break;
}
case EXTERNAL_DOUBLE_ELEMENTS: {
ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %f\n", i, p->get_scalar(i));
+ FPrintF(out, " %d: %f\n", i, p->get_scalar(i));
}
break;
}
@@ -416,9 +394,9 @@ void JSObject::PrintElements(FILE* out) {
case NON_STRICT_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
for (int i = 2; i < p->length(); i++) {
- PrintF(out, " %d: ", i);
+ FPrintF(out, " %d: ", i);
p->get(i)->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
break;
}
@@ -426,32 +404,66 @@ void JSObject::PrintElements(FILE* out) {
}
+void JSObject::PrintTransitions(FILE* out) {
+ if (!map()->HasTransitionArray()) return;
+ TransitionArray* transitions = map()->transitions();
+ for (int i = 0; i < transitions->number_of_transitions(); i++) {
+ FPrintF(out, " ");
+ transitions->GetKey(i)->StringPrint(out);
+ FPrintF(out, ": ");
+ switch (transitions->GetTargetDetails(i).type()) {
+ case FIELD: {
+ FPrintF(out, " (transition to field)\n");
+ break;
+ }
+ case CONSTANT_FUNCTION:
+ FPrintF(out, " (transition to constant function)\n");
+ break;
+ case CALLBACKS:
+ FPrintF(out, " (transition to callback)\n");
+ break;
+ // Values below are never in the target descriptor array.
+ case NORMAL:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
void JSObject::JSObjectPrint(FILE* out) {
- PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
- PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
+ FPrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
+ FPrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
PrintElementsKind(out, this->map()->elements_kind());
- PrintF(out,
+ FPrintF(out,
"]\n - prototype = %p\n",
reinterpret_cast<void*>(GetPrototype()));
- PrintF(out, " {\n");
+ FPrintF(out, " {\n");
PrintProperties(out);
+ PrintTransitions(out);
PrintElements(out);
- PrintF(out, " }\n");
+ FPrintF(out, " }\n");
}
void JSModule::JSModulePrint(FILE* out) {
HeapObject::PrintHeader(out, "JSModule");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - context = ");
+ FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ FPrintF(out, " - context = ");
context()->Print(out);
+ FPrintF(out, " - scope_info = ");
+ scope_info()->ShortPrint(out);
PrintElementsKind(out, this->map()->elements_kind());
- PrintF(out, " {\n");
+ FPrintF(out, " {\n");
PrintProperties(out);
PrintElements(out);
- PrintF(out, " }\n");
+ FPrintF(out, " }\n");
}
@@ -524,95 +536,105 @@ static const char* TypeToString(InstanceType type) {
void Map::MapPrint(FILE* out) {
HeapObject::PrintHeader(out, "Map");
- PrintF(out, " - type: %s\n", TypeToString(instance_type()));
- PrintF(out, " - instance size: %d\n", instance_size());
- PrintF(out, " - inobject properties: %d\n", inobject_properties());
- PrintF(out, " - elements kind: ");
+ FPrintF(out, " - type: %s\n", TypeToString(instance_type()));
+ FPrintF(out, " - instance size: %d\n", instance_size());
+ FPrintF(out, " - inobject properties: %d\n", inobject_properties());
+ FPrintF(out, " - elements kind: ");
PrintElementsKind(out, elements_kind());
- PrintF(out, "\n - pre-allocated property fields: %d\n",
+ FPrintF(out, "\n - pre-allocated property fields: %d\n",
pre_allocated_property_fields());
- PrintF(out, " - unused property fields: %d\n", unused_property_fields());
+ FPrintF(out, " - unused property fields: %d\n", unused_property_fields());
if (is_hidden_prototype()) {
- PrintF(out, " - hidden_prototype\n");
+ FPrintF(out, " - hidden_prototype\n");
}
if (has_named_interceptor()) {
- PrintF(out, " - named_interceptor\n");
+ FPrintF(out, " - named_interceptor\n");
}
if (has_indexed_interceptor()) {
- PrintF(out, " - indexed_interceptor\n");
+ FPrintF(out, " - indexed_interceptor\n");
}
if (is_undetectable()) {
- PrintF(out, " - undetectable\n");
+ FPrintF(out, " - undetectable\n");
}
if (has_instance_call_handler()) {
- PrintF(out, " - instance_call_handler\n");
+ FPrintF(out, " - instance_call_handler\n");
}
if (is_access_check_needed()) {
- PrintF(out, " - access_check_needed\n");
+ FPrintF(out, " - access_check_needed\n");
}
- PrintF(out, " - instance descriptors: ");
+ FPrintF(out, " - back pointer: ");
+ GetBackPointer()->ShortPrint(out);
+ FPrintF(out, "\n - instance descriptors %i #%i: ",
+ owns_descriptors(),
+ NumberOfOwnDescriptors());
instance_descriptors()->ShortPrint(out);
- PrintF(out, "\n - prototype: ");
+ if (HasTransitionArray()) {
+ FPrintF(out, "\n - transitions: ");
+ transitions()->ShortPrint(out);
+ }
+ FPrintF(out, "\n - prototype: ");
prototype()->ShortPrint(out);
- PrintF(out, "\n - constructor: ");
+ FPrintF(out, "\n - constructor: ");
constructor()->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n - code cache: ");
+ code_cache()->ShortPrint(out);
+ FPrintF(out, "\n");
}
void CodeCache::CodeCachePrint(FILE* out) {
HeapObject::PrintHeader(out, "CodeCache");
- PrintF(out, "\n - default_cache: ");
+ FPrintF(out, "\n - default_cache: ");
default_cache()->ShortPrint(out);
- PrintF(out, "\n - normal_type_cache: ");
+ FPrintF(out, "\n - normal_type_cache: ");
normal_type_cache()->ShortPrint(out);
}
void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
HeapObject::PrintHeader(out, "PolymorphicCodeCache");
- PrintF(out, "\n - cache: ");
+ FPrintF(out, "\n - cache: ");
cache()->ShortPrint(out);
}
void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
- PrintF(out, "\n - ic_total_count: %d, ic_with_type_info_count: %d",
+ FPrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n",
ic_total_count(), ic_with_type_info_count());
- PrintF(out, "\n - type_feedback_cells: ");
+ FPrintF(out, " - type_feedback_cells: ");
type_feedback_cells()->FixedArrayPrint(out);
}
void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(FILE* out) {
HeapObject::PrintHeader(out, "AliasedArgumentsEntry");
- PrintF(out, "\n - aliased_context_slot: %d", aliased_context_slot());
+ FPrintF(out, "\n - aliased_context_slot: %d", aliased_context_slot());
}
void FixedArray::FixedArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "FixedArray");
- PrintF(out, " - length: %d", length());
+ FPrintF(out, " - length: %d", length());
for (int i = 0; i < length(); i++) {
- PrintF(out, "\n [%d]: ", i);
+ FPrintF(out, "\n [%d]: ", i);
get(i)->ShortPrint(out);
}
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "FixedDoubleArray");
- PrintF(out, " - length: %d", length());
+ FPrintF(out, " - length: %d", length());
for (int i = 0; i < length(); i++) {
if (is_the_hole(i)) {
- PrintF(out, "\n [%d]: <the hole>", i);
+ FPrintF(out, "\n [%d]: <the hole>", i);
} else {
- PrintF(out, "\n [%d]: %g", i, get_scalar(i));
+ FPrintF(out, "\n [%d]: %g", i, get_scalar(i));
}
}
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
@@ -624,29 +646,29 @@ void JSValue::JSValuePrint(FILE* out) {
void JSMessageObject::JSMessageObjectPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSMessageObject");
- PrintF(out, " - type: ");
+ FPrintF(out, " - type: ");
type()->ShortPrint(out);
- PrintF(out, "\n - arguments: ");
+ FPrintF(out, "\n - arguments: ");
arguments()->ShortPrint(out);
- PrintF(out, "\n - start_position: %d", start_position());
- PrintF(out, "\n - end_position: %d", end_position());
- PrintF(out, "\n - script: ");
+ FPrintF(out, "\n - start_position: %d", start_position());
+ FPrintF(out, "\n - end_position: %d", end_position());
+ FPrintF(out, "\n - script: ");
script()->ShortPrint(out);
- PrintF(out, "\n - stack_trace: ");
+ FPrintF(out, "\n - stack_trace: ");
stack_trace()->ShortPrint(out);
- PrintF(out, "\n - stack_frames: ");
+ FPrintF(out, "\n - stack_frames: ");
stack_frames()->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
void String::StringPrint(FILE* out) {
if (StringShape(this).IsSymbol()) {
- PrintF(out, "#");
+ FPrintF(out, "#");
} else if (StringShape(this).IsCons()) {
- PrintF(out, "c\"");
+ FPrintF(out, "c\"");
} else {
- PrintF(out, "\"");
+ FPrintF(out, "\"");
}
const char truncated_epilogue[] = "...<truncated>";
@@ -657,13 +679,13 @@ void String::StringPrint(FILE* out) {
}
}
for (int i = 0; i < len; i++) {
- PrintF(out, "%c", Get(i));
+ FPrintF(out, "%c", Get(i));
}
if (len != length()) {
- PrintF(out, "%s", truncated_epilogue);
+ FPrintF(out, "%s", truncated_epilogue);
}
- if (!StringShape(this).IsSymbol()) PrintF(out, "\"");
+ if (!StringShape(this).IsSymbol()) FPrintF(out, "\"");
}
@@ -688,13 +710,13 @@ static const char* const weekdays[] = {
void JSDate::JSDatePrint(FILE* out) {
HeapObject::PrintHeader(out, "JSDate");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - value = ");
+ FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ FPrintF(out, " - value = ");
value()->Print(out);
if (!year()->IsSmi()) {
- PrintF(out, " - time = NaN\n");
+ FPrintF(out, " - time = NaN\n");
} else {
- PrintF(out, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
+ FPrintF(out, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
year()->IsSmi() ? Smi::cast(year())->value() : -1,
month()->IsSmi() ? Smi::cast(month())->value() : -1,
@@ -708,110 +730,121 @@ void JSDate::JSDatePrint(FILE* out) {
void JSProxy::JSProxyPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSProxy");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - handler = ");
+ FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ FPrintF(out, " - handler = ");
handler()->Print(out);
- PrintF(out, " - hash = ");
+ FPrintF(out, " - hash = ");
hash()->Print(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSFunctionProxy");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - handler = ");
+ FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ FPrintF(out, " - handler = ");
handler()->Print(out);
- PrintF(out, " - call_trap = ");
+ FPrintF(out, " - call_trap = ");
call_trap()->Print(out);
- PrintF(out, " - construct_trap = ");
+ FPrintF(out, " - construct_trap = ");
construct_trap()->Print(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
void JSWeakMap::JSWeakMapPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSWeakMap");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - table = ");
+ FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ FPrintF(out, " - table = ");
table()->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
void JSFunction::JSFunctionPrint(FILE* out) {
HeapObject::PrintHeader(out, "Function");
- PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
- PrintF(out, " - initial_map = ");
+ FPrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ FPrintF(out, " - initial_map = ");
if (has_initial_map()) {
initial_map()->ShortPrint(out);
}
- PrintF(out, "\n - shared_info = ");
+ FPrintF(out, "\n - shared_info = ");
shared()->ShortPrint(out);
- PrintF(out, "\n - name = ");
+ FPrintF(out, "\n - name = ");
shared()->name()->Print(out);
- PrintF(out, "\n - context = ");
+ FPrintF(out, "\n - context = ");
unchecked_context()->ShortPrint(out);
- PrintF(out, "\n - code = ");
+ FPrintF(out, "\n - literals = ");
+ literals()->ShortPrint(out);
+ FPrintF(out, "\n - code = ");
code()->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
PrintProperties(out);
PrintElements(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "SharedFunctionInfo");
- PrintF(out, " - name: ");
+ FPrintF(out, " - name: ");
name()->ShortPrint(out);
- PrintF(out, "\n - expected_nof_properties: %d", expected_nof_properties());
- PrintF(out, "\n - instance class name = ");
+ FPrintF(out, "\n - expected_nof_properties: %d", expected_nof_properties());
+ FPrintF(out, "\n - instance class name = ");
instance_class_name()->Print(out);
- PrintF(out, "\n - code = ");
+ FPrintF(out, "\n - code = ");
code()->ShortPrint(out);
- PrintF(out, "\n - source code = ");
- GetSourceCode()->ShortPrint(out);
+ if (HasSourceCode()) {
+ FPrintF(out, "\n - source code = ");
+ String* source = String::cast(Script::cast(script())->source());
+ int start = start_position();
+ int length = end_position() - start;
+ SmartArrayPointer<char> source_string =
+ source->ToCString(DISALLOW_NULLS,
+ FAST_STRING_TRAVERSAL,
+ start, length, NULL);
+ FPrintF(out, "%s", *source_string);
+ }
// Script files are often large, hard to read.
- // PrintF(out, "\n - script =");
+ // FPrintF(out, "\n - script =");
// script()->Print(out);
- PrintF(out, "\n - function token position = %d", function_token_position());
- PrintF(out, "\n - start position = %d", start_position());
- PrintF(out, "\n - end position = %d", end_position());
- PrintF(out, "\n - is expression = %d", is_expression());
- PrintF(out, "\n - debug info = ");
+ FPrintF(out, "\n - function token position = %d", function_token_position());
+ FPrintF(out, "\n - start position = %d", start_position());
+ FPrintF(out, "\n - end position = %d", end_position());
+ FPrintF(out, "\n - is expression = %d", is_expression());
+ FPrintF(out, "\n - debug info = ");
debug_info()->ShortPrint(out);
- PrintF(out, "\n - length = %d", length());
- PrintF(out, "\n - has_only_simple_this_property_assignments = %d",
+ FPrintF(out, "\n - length = %d", length());
+ FPrintF(out, "\n - has_only_simple_this_property_assignments = %d",
has_only_simple_this_property_assignments());
- PrintF(out, "\n - this_property_assignments = ");
+ FPrintF(out, "\n - this_property_assignments = ");
this_property_assignments()->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
void JSGlobalProxy::JSGlobalProxyPrint(FILE* out) {
- PrintF(out, "global_proxy");
+ FPrintF(out, "global_proxy ");
JSObjectPrint(out);
- PrintF(out, "context : ");
- context()->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "native context : ");
+ native_context()->ShortPrint(out);
+ FPrintF(out, "\n");
}
void JSGlobalObject::JSGlobalObjectPrint(FILE* out) {
- PrintF(out, "global ");
+ FPrintF(out, "global ");
JSObjectPrint(out);
- PrintF(out, "global context : ");
- global_context()->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "native context : ");
+ native_context()->ShortPrint(out);
+ FPrintF(out, "\n");
}
void JSBuiltinsObject::JSBuiltinsObjectPrint(FILE* out) {
- PrintF(out, "builtins ");
+ FPrintF(out, "builtins ");
JSObjectPrint(out);
}
@@ -832,204 +865,235 @@ void Code::CodePrint(FILE* out) {
void Foreign::ForeignPrint(FILE* out) {
- PrintF(out, "foreign address : %p", foreign_address());
+ FPrintF(out, "foreign address : %p", foreign_address());
}
void AccessorInfo::AccessorInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "AccessorInfo");
- PrintF(out, "\n - getter: ");
+ FPrintF(out, "\n - getter: ");
getter()->ShortPrint(out);
- PrintF(out, "\n - setter: ");
+ FPrintF(out, "\n - setter: ");
setter()->ShortPrint(out);
- PrintF(out, "\n - name: ");
+ FPrintF(out, "\n - name: ");
name()->ShortPrint(out);
- PrintF(out, "\n - data: ");
+ FPrintF(out, "\n - data: ");
data()->ShortPrint(out);
- PrintF(out, "\n - flag: ");
+ FPrintF(out, "\n - flag: ");
flag()->ShortPrint(out);
}
void AccessorPair::AccessorPairPrint(FILE* out) {
HeapObject::PrintHeader(out, "AccessorPair");
- PrintF(out, "\n - getter: ");
+ FPrintF(out, "\n - getter: ");
getter()->ShortPrint(out);
- PrintF(out, "\n - setter: ");
+ FPrintF(out, "\n - setter: ");
setter()->ShortPrint(out);
}
void AccessCheckInfo::AccessCheckInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "AccessCheckInfo");
- PrintF(out, "\n - named_callback: ");
+ FPrintF(out, "\n - named_callback: ");
named_callback()->ShortPrint(out);
- PrintF(out, "\n - indexed_callback: ");
+ FPrintF(out, "\n - indexed_callback: ");
indexed_callback()->ShortPrint(out);
- PrintF(out, "\n - data: ");
+ FPrintF(out, "\n - data: ");
data()->ShortPrint(out);
}
void InterceptorInfo::InterceptorInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "InterceptorInfo");
- PrintF(out, "\n - getter: ");
+ FPrintF(out, "\n - getter: ");
getter()->ShortPrint(out);
- PrintF(out, "\n - setter: ");
+ FPrintF(out, "\n - setter: ");
setter()->ShortPrint(out);
- PrintF(out, "\n - query: ");
+ FPrintF(out, "\n - query: ");
query()->ShortPrint(out);
- PrintF(out, "\n - deleter: ");
+ FPrintF(out, "\n - deleter: ");
deleter()->ShortPrint(out);
- PrintF(out, "\n - enumerator: ");
+ FPrintF(out, "\n - enumerator: ");
enumerator()->ShortPrint(out);
- PrintF(out, "\n - data: ");
+ FPrintF(out, "\n - data: ");
data()->ShortPrint(out);
}
void CallHandlerInfo::CallHandlerInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "CallHandlerInfo");
- PrintF(out, "\n - callback: ");
+ FPrintF(out, "\n - callback: ");
callback()->ShortPrint(out);
- PrintF(out, "\n - data: ");
+ FPrintF(out, "\n - data: ");
data()->ShortPrint(out);
- PrintF(out, "\n - call_stub_cache: ");
+ FPrintF(out, "\n - call_stub_cache: ");
}
void FunctionTemplateInfo::FunctionTemplateInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "FunctionTemplateInfo");
- PrintF(out, "\n - class name: ");
+ FPrintF(out, "\n - class name: ");
class_name()->ShortPrint(out);
- PrintF(out, "\n - tag: ");
+ FPrintF(out, "\n - tag: ");
tag()->ShortPrint(out);
- PrintF(out, "\n - property_list: ");
+ FPrintF(out, "\n - property_list: ");
property_list()->ShortPrint(out);
- PrintF(out, "\n - serial_number: ");
+ FPrintF(out, "\n - serial_number: ");
serial_number()->ShortPrint(out);
- PrintF(out, "\n - call_code: ");
+ FPrintF(out, "\n - call_code: ");
call_code()->ShortPrint(out);
- PrintF(out, "\n - property_accessors: ");
+ FPrintF(out, "\n - property_accessors: ");
property_accessors()->ShortPrint(out);
- PrintF(out, "\n - prototype_template: ");
+ FPrintF(out, "\n - prototype_template: ");
prototype_template()->ShortPrint(out);
- PrintF(out, "\n - parent_template: ");
+ FPrintF(out, "\n - parent_template: ");
parent_template()->ShortPrint(out);
- PrintF(out, "\n - named_property_handler: ");
+ FPrintF(out, "\n - named_property_handler: ");
named_property_handler()->ShortPrint(out);
- PrintF(out, "\n - indexed_property_handler: ");
+ FPrintF(out, "\n - indexed_property_handler: ");
indexed_property_handler()->ShortPrint(out);
- PrintF(out, "\n - instance_template: ");
+ FPrintF(out, "\n - instance_template: ");
instance_template()->ShortPrint(out);
- PrintF(out, "\n - signature: ");
+ FPrintF(out, "\n - signature: ");
signature()->ShortPrint(out);
- PrintF(out, "\n - access_check_info: ");
+ FPrintF(out, "\n - access_check_info: ");
access_check_info()->ShortPrint(out);
- PrintF(out, "\n - hidden_prototype: %s",
+ FPrintF(out, "\n - hidden_prototype: %s",
hidden_prototype() ? "true" : "false");
- PrintF(out, "\n - undetectable: %s", undetectable() ? "true" : "false");
- PrintF(out, "\n - need_access_check: %s",
+ FPrintF(out, "\n - undetectable: %s", undetectable() ? "true" : "false");
+ FPrintF(out, "\n - need_access_check: %s",
needs_access_check() ? "true" : "false");
}
void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "ObjectTemplateInfo");
- PrintF(out, " - tag: ");
+ FPrintF(out, " - tag: ");
tag()->ShortPrint(out);
- PrintF(out, "\n - property_list: ");
+ FPrintF(out, "\n - property_list: ");
property_list()->ShortPrint(out);
- PrintF(out, "\n - constructor: ");
+ FPrintF(out, "\n - constructor: ");
constructor()->ShortPrint(out);
- PrintF(out, "\n - internal_field_count: ");
+ FPrintF(out, "\n - internal_field_count: ");
internal_field_count()->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
void SignatureInfo::SignatureInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "SignatureInfo");
- PrintF(out, "\n - receiver: ");
+ FPrintF(out, "\n - receiver: ");
receiver()->ShortPrint(out);
- PrintF(out, "\n - args: ");
+ FPrintF(out, "\n - args: ");
args()->ShortPrint(out);
}
void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeSwitchInfo");
- PrintF(out, "\n - types: ");
+ FPrintF(out, "\n - types: ");
types()->ShortPrint(out);
}
void Script::ScriptPrint(FILE* out) {
HeapObject::PrintHeader(out, "Script");
- PrintF(out, "\n - source: ");
+ FPrintF(out, "\n - source: ");
source()->ShortPrint(out);
- PrintF(out, "\n - name: ");
+ FPrintF(out, "\n - name: ");
name()->ShortPrint(out);
- PrintF(out, "\n - line_offset: ");
+ FPrintF(out, "\n - line_offset: ");
line_offset()->ShortPrint(out);
- PrintF(out, "\n - column_offset: ");
+ FPrintF(out, "\n - column_offset: ");
column_offset()->ShortPrint(out);
- PrintF(out, "\n - type: ");
+ FPrintF(out, "\n - type: ");
type()->ShortPrint(out);
- PrintF(out, "\n - id: ");
+ FPrintF(out, "\n - id: ");
id()->ShortPrint(out);
- PrintF(out, "\n - data: ");
+ FPrintF(out, "\n - data: ");
data()->ShortPrint(out);
- PrintF(out, "\n - context data: ");
+ FPrintF(out, "\n - context data: ");
context_data()->ShortPrint(out);
- PrintF(out, "\n - wrapper: ");
+ FPrintF(out, "\n - wrapper: ");
wrapper()->ShortPrint(out);
- PrintF(out, "\n - compilation type: ");
+ FPrintF(out, "\n - compilation type: ");
compilation_type()->ShortPrint(out);
- PrintF(out, "\n - line ends: ");
+ FPrintF(out, "\n - line ends: ");
line_ends()->ShortPrint(out);
- PrintF(out, "\n - eval from shared: ");
+ FPrintF(out, "\n - eval from shared: ");
eval_from_shared()->ShortPrint(out);
- PrintF(out, "\n - eval from instructions offset: ");
+ FPrintF(out, "\n - eval from instructions offset: ");
eval_from_instructions_offset()->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugInfo::DebugInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "DebugInfo");
- PrintF(out, "\n - shared: ");
+ FPrintF(out, "\n - shared: ");
shared()->ShortPrint(out);
- PrintF(out, "\n - original_code: ");
+ FPrintF(out, "\n - original_code: ");
original_code()->ShortPrint(out);
- PrintF(out, "\n - code: ");
+ FPrintF(out, "\n - code: ");
code()->ShortPrint(out);
- PrintF(out, "\n - break_points: ");
+ FPrintF(out, "\n - break_points: ");
break_points()->Print(out);
}
void BreakPointInfo::BreakPointInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "BreakPointInfo");
- PrintF(out, "\n - code_position: %d", code_position()->value());
- PrintF(out, "\n - source_position: %d", source_position()->value());
- PrintF(out, "\n - statement_position: %d", statement_position()->value());
- PrintF(out, "\n - break_point_objects: ");
+ FPrintF(out, "\n - code_position: %d", code_position()->value());
+ FPrintF(out, "\n - source_position: %d", source_position()->value());
+ FPrintF(out, "\n - statement_position: %d", statement_position()->value());
+ FPrintF(out, "\n - break_point_objects: ");
break_point_objects()->ShortPrint(out);
}
#endif // ENABLE_DEBUGGER_SUPPORT
void DescriptorArray::PrintDescriptors(FILE* out) {
- PrintF(out, "Descriptor array %d\n", number_of_descriptors());
+ FPrintF(out, "Descriptor array %d\n", number_of_descriptors());
for (int i = 0; i < number_of_descriptors(); i++) {
- PrintF(out, " %d: ", i);
+ FPrintF(out, " %d: ", i);
Descriptor desc;
Get(i, &desc);
desc.Print(out);
}
- PrintF(out, "\n");
+ FPrintF(out, "\n");
+}
+
+
+void TransitionArray::PrintTransitions(FILE* out) {
+ FPrintF(out, "Transition array %d\n", number_of_transitions());
+ for (int i = 0; i < number_of_transitions(); i++) {
+ FPrintF(out, " %d: ", i);
+ GetKey(i)->StringPrint(out);
+ FPrintF(out, ": ");
+ switch (GetTargetDetails(i).type()) {
+ case FIELD: {
+ FPrintF(out, " (transition to field)\n");
+ break;
+ }
+ case CONSTANT_FUNCTION:
+ FPrintF(out, " (transition to constant function)\n");
+ break;
+ case CALLBACKS:
+ FPrintF(out, " (transition to callback)\n");
+ break;
+ // Values below are never in the target descriptor array.
+ case NORMAL:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
+ }
+ }
+ FPrintF(out, "\n");
}
diff --git a/src/3rdparty/v8/src/objects-visiting-inl.h b/src/3rdparty/v8/src/objects-visiting-inl.h
index 8ba92f7..71635ca 100644
--- a/src/3rdparty/v8/src/objects-visiting-inl.h
+++ b/src/3rdparty/v8/src/objects-visiting-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -56,7 +56,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
- table_.Register(kVisitGlobalContext,
+ table_.Register(kVisitNativeContext,
&FixedBodyVisitor<StaticVisitor,
Context::ScavengeBodyDescriptor,
int>::Visit);
@@ -93,6 +93,537 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
}
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::Initialize() {
+ table_.Register(kVisitShortcutCandidate,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitConsString,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitSlicedString,
+ &FixedBodyVisitor<StaticVisitor,
+ SlicedString::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitFixedArray,
+ &FlexibleBodyVisitor<StaticVisitor,
+ FixedArray::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitNativeContext, &VisitNativeContext);
+
+ table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
+
+ table_.Register(kVisitJSWeakMap, &StaticVisitor::VisitJSWeakMap);
+
+ table_.Register(kVisitOddball,
+ &FixedBodyVisitor<StaticVisitor,
+ Oddball::BodyDescriptor,
+ void>::Visit);
+
+ table_.Register(kVisitMap, &VisitMap);
+
+ table_.Register(kVisitCode, &VisitCode);
+
+ table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
+
+ table_.Register(kVisitJSFunction, &VisitJSFunction);
+
+ // Registration for kVisitJSRegExp is done by StaticVisitor.
+
+ table_.Register(kVisitPropertyCell,
+ &FixedBodyVisitor<StaticVisitor,
+ JSGlobalPropertyCell::BodyDescriptor,
+ void>::Visit);
+
+ table_.template RegisterSpecializations<DataObjectVisitor,
+ kVisitDataObject,
+ kVisitDataObjectGeneric>();
+
+ table_.template RegisterSpecializations<JSObjectVisitor,
+ kVisitJSObject,
+ kVisitJSObjectGeneric>();
+
+ table_.template RegisterSpecializations<StructObjectVisitor,
+ kVisitStruct,
+ kVisitStructGeneric>();
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
+ Heap* heap, Address entry_address) {
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code);
+ StaticVisitor::MarkObject(heap, code);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ ASSERT(!rinfo->target_object()->IsConsString());
+ HeapObject* object = HeapObject::cast(rinfo->target_object());
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ StaticVisitor::MarkObject(heap, object);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitGlobalPropertyCell(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+ JSGlobalPropertyCell* cell = rinfo->target_cell();
+ StaticVisitor::MarkObject(heap, cell);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+ rinfo->IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ StaticVisitor::MarkObject(heap, target);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ // Monomorphic ICs are preserved when possible, but need to be flushed
+ // when they might be keeping a Context alive, or when the heap is about
+ // to be serialized.
+ if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
+ && (target->ic_state() == MEGAMORPHIC || heap->flush_monomorphic_ics() ||
+ Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
+ IC::Clear(rinfo->pc());
+ target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ }
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ StaticVisitor::MarkObject(heap, target);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
+ Heap* heap, RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Code* target = rinfo->code_age_stub();
+ ASSERT(target != NULL);
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ StaticVisitor::MarkObject(heap, target);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
+ Map* map, HeapObject* object) {
+ FixedBodyVisitor<StaticVisitor,
+ Context::MarkCompactBodyDescriptor,
+ void>::Visit(map, object);
+
+ MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+ for (int idx = Context::FIRST_WEAK_SLOT;
+ idx < Context::NATIVE_CONTEXT_SLOTS;
+ ++idx) {
+ Object** slot =
+ HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+ collector->RecordSlot(slot, slot, *slot);
+ }
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitMap(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ Map* map_object = Map::cast(object);
+
+ // Clears the cache of ICs related to this map.
+ if (FLAG_cleanup_code_caches_at_gc) {
+ map_object->ClearCodeCache(heap);
+ }
+
+ // When map collection is enabled we have to mark through map's
+ // transitions and back pointers in a special way to make these links
+ // weak. Only maps for subclasses of JSReceiver can have transitions.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ if (FLAG_collect_maps &&
+ map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ MarkMapContents(heap, map_object);
+ } else {
+ StaticVisitor::VisitPointers(heap,
+ HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
+ }
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCode(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ Code* code = Code::cast(object);
+ if (FLAG_cleanup_code_caches_at_gc) {
+ code->ClearTypeFeedbackCells(heap);
+ }
+ if (FLAG_age_code && !Serializer::enabled()) {
+ code->MakeOlder(heap->mark_compact_collector()->marking_parity());
+ }
+ code->CodeIterateBody<StaticVisitor>(heap);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+ if (shared->ic_age() != heap->global_ic_age()) {
+ shared->ResetForNewContext(heap->global_ic_age());
+ }
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (collector->is_code_flushing_enabled()) {
+ if (IsFlushable(heap, shared)) {
+ // This function's code looks flushable. But we have to postpone
+ // the decision until we see all functions that point to the same
+ // SharedFunctionInfo because some of them might be optimized.
+ // That would also make the non-optimized version of the code
+ // non-flushable, because it is required for bailing out from
+ // optimized code.
+ collector->code_flusher()->AddCandidate(shared);
+ // Treat the reference to the code object weakly.
+ VisitSharedFunctionInfoWeakCode(heap, object);
+ return;
+ }
+ }
+ VisitSharedFunctionInfoStrongCode(heap, object);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ JSFunction* function = JSFunction::cast(object);
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (collector->is_code_flushing_enabled()) {
+ if (IsFlushable(heap, function)) {
+ // This function's code looks flushable. But we have to postpone
+ // the decision until we see all functions that point to the same
+ // SharedFunctionInfo because some of them might be optimized.
+ // That would also make the non-optimized version of the code
+ // non-flushable, because it is required for bailing out from
+ // optimized code.
+ collector->code_flusher()->AddCandidate(function);
+ // Visit shared function info immediately to avoid double checking
+ // of its flushability later. This is just an optimization because
+ // the shared function info would eventually be visited.
+ SharedFunctionInfo* shared = function->unchecked_shared();
+ if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
+ StaticVisitor::MarkObject(heap, shared->map());
+ VisitSharedFunctionInfoWeakCode(heap, shared);
+ }
+ // Treat the reference to the code object weakly.
+ VisitJSFunctionWeakCode(heap, object);
+ return;
+ } else {
+ // Visit all unoptimized code objects to prevent flushing them.
+ StaticVisitor::MarkObject(heap, function->shared()->code());
+ if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
+ MarkInlinedFunctionsCode(heap, function->code());
+ }
+ }
+ }
+ VisitJSFunctionStrongCode(heap, object);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
+ Map* map, HeapObject* object) {
+ int last_property_offset =
+ JSRegExp::kSize + kPointerSize * map->inobject_properties();
+ StaticVisitor::VisitPointers(map->GetHeap(),
+ HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
+ HeapObject::RawField(object, last_property_offset));
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
+ Heap* heap, Map* map) {
+ // Make sure that the back pointer stored either in the map itself or
+ // inside its transitions array is marked. Skip recording the back
+ // pointer slot since map space is not compacted.
+ StaticVisitor::MarkObject(heap, HeapObject::cast(map->GetBackPointer()));
+
+ // Treat pointers in the transitions array as weak and also mark that
+ // array to prevent visiting it later. Skip recording the transition
+ // array slot, since it will be implicitly recorded when the pointer
+ // fields of this map are visited.
+ TransitionArray* transitions = map->unchecked_transition_array();
+ if (transitions->IsTransitionArray()) {
+ MarkTransitionArray(heap, transitions);
+ } else {
+ // Already marked by marking map->GetBackPointer() above.
+ ASSERT(transitions->IsMap() || transitions->IsUndefined());
+ }
+
+ // Mark the pointer fields of the Map. Since the transitions array has
+ // been marked already, it is fine that one of these fields contains a
+ // pointer to it.
+ StaticVisitor::VisitPointers(heap,
+ HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
+ Heap* heap, TransitionArray* transitions) {
+ if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
+
+ // Simple transitions do not have keys nor prototype transitions.
+ if (transitions->IsSimpleTransition()) return;
+
+ if (transitions->HasPrototypeTransitions()) {
+ // Mark prototype transitions array but do not push it onto marking
+ // stack, this will make references from it weak. We will clean dead
+ // prototype transitions in ClearNonLiveTransitions.
+ Object** slot = transitions->GetPrototypeTransitionsSlot();
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ }
+
+ for (int i = 0; i < transitions->number_of_transitions(); ++i) {
+ StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i));
+ }
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(
+ Heap* heap, Code* code) {
+ // For optimized functions we should retain both non-optimized version
+ // of its code and non-optimized version of all inlined functions.
+ // This is required to support bailing out from inlined code.
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ FixedArray* literals = data->LiteralArray();
+ for (int i = 0, count = data->InlinedFunctionCount()->value();
+ i < count;
+ i++) {
+ JSFunction* inlined = JSFunction::cast(literals->get(i));
+ StaticVisitor::MarkObject(heap, inlined->shared()->code());
+ }
+}
+
+
+inline static bool IsValidNonBuiltinContext(Object* context) {
+ return context->IsContext() &&
+ !Context::cast(context)->global_object()->IsJSBuiltinsObject();
+}
+
+
+inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
+ Object* undefined = heap->undefined_value();
+ return (info->script() != undefined) &&
+ (reinterpret_cast<Script*>(info->script())->source() != undefined);
+}
+
+
+template<typename StaticVisitor>
+bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
+ Heap* heap, JSFunction* function) {
+ SharedFunctionInfo* shared_info = function->unchecked_shared();
+
+ // Code is either on stack, in compilation cache or referenced
+ // by optimized version of function.
+ MarkBit code_mark = Marking::MarkBitFrom(function->code());
+ if (code_mark.Get()) {
+ if (!FLAG_age_code) {
+ if (!Marking::MarkBitFrom(shared_info).Get()) {
+ shared_info->set_code_age(0);
+ }
+ }
+ return false;
+ }
+
+ // The function must have a valid context and not be a builtin.
+ if (!IsValidNonBuiltinContext(function->unchecked_context())) {
+ return false;
+ }
+
+ // We do not (yet) flush code for optimized functions.
+ if (function->code() != shared_info->code()) {
+ return false;
+ }
+
+ // Check age of optimized code.
+ if (FLAG_age_code && !function->code()->IsOld()) {
+ return false;
+ }
+
+ return IsFlushable(heap, shared_info);
+}
+
+
+template<typename StaticVisitor>
+bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
+ Heap* heap, SharedFunctionInfo* shared_info) {
+ // Code is either on stack, in compilation cache or referenced
+ // by optimized version of function.
+ MarkBit code_mark = Marking::MarkBitFrom(shared_info->code());
+ if (code_mark.Get()) {
+ return false;
+ }
+
+ // The function must be compiled and have the source code available,
+ // to be able to recompile it in case we need the function again.
+ if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
+ return false;
+ }
+
+ // We never flush code for API functions.
+ Object* function_data = shared_info->function_data();
+ if (function_data->IsFunctionTemplateInfo()) {
+ return false;
+ }
+
+ // Only flush code for functions.
+ if (shared_info->code()->kind() != Code::FUNCTION) {
+ return false;
+ }
+
+ // Function must be lazy compilable.
+ if (!shared_info->allows_lazy_compilation()) {
+ return false;
+ }
+
+ // If this is a full script wrapped in a function we do no flush the code.
+ if (shared_info->is_toplevel()) {
+ return false;
+ }
+
+ if (FLAG_age_code) {
+ return shared_info->code()->IsOld();
+ } else {
+ // How many collections newly compiled code object will survive before being
+ // flushed.
+ static const int kCodeAgeThreshold = 5;
+
+ // Age this shared function info.
+ if (shared_info->code_age() < kCodeAgeThreshold) {
+ shared_info->set_code_age(shared_info->code_age() + 1);
+ return false;
+ }
+ return true;
+ }
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
+ Heap* heap, HeapObject* object) {
+ StaticVisitor::BeforeVisitingSharedFunctionInfo(object);
+ Object** start_slot =
+ HeapObject::RawField(object,
+ SharedFunctionInfo::BodyDescriptor::kStartOffset);
+ Object** end_slot =
+ HeapObject::RawField(object,
+ SharedFunctionInfo::BodyDescriptor::kEndOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
+ Heap* heap, HeapObject* object) {
+ StaticVisitor::BeforeVisitingSharedFunctionInfo(object);
+ Object** name_slot =
+ HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
+ StaticVisitor::VisitPointer(heap, name_slot);
+
+ // Skip visiting kCodeOffset as it is treated weakly here.
+ STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
+ SharedFunctionInfo::kCodeOffset);
+ STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize ==
+ SharedFunctionInfo::kOptimizedCodeMapOffset);
+
+ Object** start_slot =
+ HeapObject::RawField(object,
+ SharedFunctionInfo::kOptimizedCodeMapOffset);
+ Object** end_slot =
+ HeapObject::RawField(object,
+ SharedFunctionInfo::BodyDescriptor::kEndOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
+ Heap* heap, HeapObject* object) {
+ Object** start_slot =
+ HeapObject::RawField(object, JSFunction::kPropertiesOffset);
+ Object** end_slot =
+ HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+
+ VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
+ STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+ JSFunction::kPrototypeOrInitialMapOffset);
+
+ start_slot =
+ HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
+ end_slot =
+ HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
+ Heap* heap, HeapObject* object) {
+ Object** start_slot =
+ HeapObject::RawField(object, JSFunction::kPropertiesOffset);
+ Object** end_slot =
+ HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+
+ // Skip visiting kCodeEntryOffset as it is treated weakly here.
+ STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+ JSFunction::kPrototypeOrInitialMapOffset);
+
+ start_slot =
+ HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
+ end_slot =
+ HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
+ StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
void Code::CodeIterateBody(ObjectVisitor* v) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
diff --git a/src/3rdparty/v8/src/objects-visiting.cc b/src/3rdparty/v8/src/objects-visiting.cc
index a2dc43e..6ae4d7c 100644
--- a/src/3rdparty/v8/src/objects-visiting.cc
+++ b/src/3rdparty/v8/src/objects-visiting.cc
@@ -45,7 +45,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
if (instance_type < FIRST_NONSTRING_TYPE) {
switch (instance_type & kStringRepresentationMask) {
case kSeqStringTag:
- if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
+ if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
return kVisitSeqAsciiString;
} else {
return kVisitSeqTwoByteString;
diff --git a/src/3rdparty/v8/src/objects-visiting.h b/src/3rdparty/v8/src/objects-visiting.h
index b476dfe..3937e25 100644
--- a/src/3rdparty/v8/src/objects-visiting.h
+++ b/src/3rdparty/v8/src/objects-visiting.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -46,71 +46,70 @@ namespace internal {
// Base class for all static visitors.
class StaticVisitorBase : public AllStatic {
public:
+#define VISITOR_ID_LIST(V) \
+ V(SeqAsciiString) \
+ V(SeqTwoByteString) \
+ V(ShortcutCandidate) \
+ V(ByteArray) \
+ V(FreeSpace) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(NativeContext) \
+ V(DataObject2) \
+ V(DataObject3) \
+ V(DataObject4) \
+ V(DataObject5) \
+ V(DataObject6) \
+ V(DataObject7) \
+ V(DataObject8) \
+ V(DataObject9) \
+ V(DataObjectGeneric) \
+ V(JSObject2) \
+ V(JSObject3) \
+ V(JSObject4) \
+ V(JSObject5) \
+ V(JSObject6) \
+ V(JSObject7) \
+ V(JSObject8) \
+ V(JSObject9) \
+ V(JSObjectGeneric) \
+ V(Struct2) \
+ V(Struct3) \
+ V(Struct4) \
+ V(Struct5) \
+ V(Struct6) \
+ V(Struct7) \
+ V(Struct8) \
+ V(Struct9) \
+ V(StructGeneric) \
+ V(ConsString) \
+ V(SlicedString) \
+ V(Oddball) \
+ V(Code) \
+ V(Map) \
+ V(PropertyCell) \
+ V(SharedFunctionInfo) \
+ V(JSFunction) \
+ V(JSWeakMap) \
+ V(JSRegExp)
+
+ // For data objects, JS objects and structs along with generic visitor which
+ // can visit object of any size we provide visitors specialized by
+ // object size in words.
+ // Ids of specialized visitors are declared in a linear order (without
+ // holes) starting from the id of visitor specialized for 2 words objects
+ // (base visitor id) and ending with the id of generic visitor.
+ // Method GetVisitorIdForSize depends on this ordering to calculate visitor
+ // id of specialized visitor from given instance size, base visitor id and
+ // generic visitor's id.
enum VisitorId {
- kVisitSeqAsciiString = 0,
- kVisitSeqTwoByteString,
- kVisitShortcutCandidate,
- kVisitByteArray,
- kVisitFreeSpace,
- kVisitFixedArray,
- kVisitFixedDoubleArray,
- kVisitGlobalContext,
-
- // For data objects, JS objects and structs along with generic visitor which
- // can visit object of any size we provide visitors specialized by
- // object size in words.
- // Ids of specialized visitors are declared in a linear order (without
- // holes) starting from the id of visitor specialized for 2 words objects
- // (base visitor id) and ending with the id of generic visitor.
- // Method GetVisitorIdForSize depends on this ordering to calculate visitor
- // id of specialized visitor from given instance size, base visitor id and
- // generic visitor's id.
-
- kVisitDataObject,
- kVisitDataObject2 = kVisitDataObject,
- kVisitDataObject3,
- kVisitDataObject4,
- kVisitDataObject5,
- kVisitDataObject6,
- kVisitDataObject7,
- kVisitDataObject8,
- kVisitDataObject9,
- kVisitDataObjectGeneric,
-
- kVisitJSObject,
- kVisitJSObject2 = kVisitJSObject,
- kVisitJSObject3,
- kVisitJSObject4,
- kVisitJSObject5,
- kVisitJSObject6,
- kVisitJSObject7,
- kVisitJSObject8,
- kVisitJSObject9,
- kVisitJSObjectGeneric,
-
- kVisitStruct,
- kVisitStruct2 = kVisitStruct,
- kVisitStruct3,
- kVisitStruct4,
- kVisitStruct5,
- kVisitStruct6,
- kVisitStruct7,
- kVisitStruct8,
- kVisitStruct9,
- kVisitStructGeneric,
-
- kVisitConsString,
- kVisitSlicedString,
- kVisitOddball,
- kVisitCode,
- kVisitMap,
- kVisitPropertyCell,
- kVisitSharedFunctionInfo,
- kVisitJSFunction,
- kVisitJSWeakMap,
- kVisitJSRegExp,
-
+#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
+ VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+#undef VISITOR_ID_ENUM_DECL
kVisitorIdCount,
+ kVisitDataObject = kVisitDataObject2,
+ kVisitJSObject = kVisitJSObject2,
+ kVisitStruct = kVisitStruct2,
kMinObjectSizeInWords = 2
};
@@ -361,7 +360,97 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
template<typename StaticVisitor>
VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
- StaticNewSpaceVisitor<StaticVisitor>::table_;
+ StaticNewSpaceVisitor<StaticVisitor>::table_;
+
+
+// Base class for visitors used to transitively mark the entire heap.
+// IterateBody returns nothing.
+// Certain types of objects might not be handled by this base class and
+// no visitor function is registered by the generic initialization. A
+// specialized visitor function needs to be provided by the inheriting
+// class itself for those cases.
+//
+// This class is intended to be used in the following way:
+//
+// class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> {
+// ...
+// }
+//
+// This is an example of Curiously recurring template pattern.
+template<typename StaticVisitor>
+class StaticMarkingVisitor : public StaticVisitorBase {
+ public:
+ static void Initialize();
+
+ static inline void IterateBody(Map* map, HeapObject* obj) {
+ table_.GetVisitor(map)(map, obj);
+ }
+
+ static inline void VisitCodeEntry(Heap* heap, Address entry_address);
+ static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo);
+ static inline void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo);
+ static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo);
+ static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo);
+ static inline void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo);
+ static inline void VisitExternalReference(RelocInfo* rinfo) { }
+ static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
+
+ // TODO(mstarzinger): This should be made protected once refactoring is done.
+ // Mark non-optimize code for functions inlined into the given optimized
+ // code. This will prevent it from being flushed.
+ static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
+
+ protected:
+ static inline void VisitMap(Map* map, HeapObject* object);
+ static inline void VisitCode(Map* map, HeapObject* object);
+ static inline void VisitSharedFunctionInfo(Map* map, HeapObject* object);
+ static inline void VisitJSFunction(Map* map, HeapObject* object);
+ static inline void VisitJSRegExp(Map* map, HeapObject* object);
+ static inline void VisitNativeContext(Map* map, HeapObject* object);
+
+ // Mark pointers in a Map and its TransitionArray together, possibly
+ // treating transitions or back pointers weak.
+ static void MarkMapContents(Heap* heap, Map* map);
+ static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
+
+ // Code flushing support.
+ static inline bool IsFlushable(Heap* heap, JSFunction* function);
+ static inline bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info);
+
+ // Helpers used by code flushing support that visit pointer fields and treat
+ // references to code objects either strongly or weakly.
+ static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
+ static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
+ static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object);
+ static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object);
+
+ class DataObjectVisitor {
+ public:
+ template<int size>
+ static inline void VisitSpecialized(Map* map, HeapObject* object) {
+ }
+
+ static inline void Visit(Map* map, HeapObject* object) {
+ }
+ };
+
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ JSObject::BodyDescriptor,
+ void> JSObjectVisitor;
+
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ StructBodyDescriptor,
+ void> StructObjectVisitor;
+
+ typedef void (*Callback)(Map* map, HeapObject* object);
+
+ static VisitorDispatchTable<Callback> table_;
+};
+
+
+template<typename StaticVisitor>
+VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
+ StaticMarkingVisitor<StaticVisitor>::table_;
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/objects.cc b/src/3rdparty/v8/src/objects.cc
index 7373384..0d3836b 100644
--- a/src/3rdparty/v8/src/objects.cc
+++ b/src/3rdparty/v8/src/objects.cc
@@ -56,11 +56,6 @@
namespace v8 {
namespace internal {
-void PrintElementsKind(FILE* out, ElementsKind kind) {
- ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
- PrintF(out, "%s", accessor->name());
-}
-
MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
Object* value) {
@@ -74,13 +69,13 @@ MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
}
-MaybeObject* Object::ToObject(Context* global_context) {
+MaybeObject* Object::ToObject(Context* native_context) {
if (IsNumber()) {
- return CreateJSValue(global_context->number_function(), this);
+ return CreateJSValue(native_context->number_function(), this);
} else if (IsBoolean()) {
- return CreateJSValue(global_context->boolean_function(), this);
+ return CreateJSValue(native_context->boolean_function(), this);
} else if (IsString()) {
- return CreateJSValue(global_context->string_function(), this);
+ return CreateJSValue(native_context->string_function(), this);
}
ASSERT(IsJSObject());
return this;
@@ -92,16 +87,16 @@ MaybeObject* Object::ToObject() {
return this;
} else if (IsNumber()) {
Isolate* isolate = Isolate::Current();
- Context* global_context = isolate->context()->global_context();
- return CreateJSValue(global_context->number_function(), this);
+ Context* native_context = isolate->context()->native_context();
+ return CreateJSValue(native_context->number_function(), this);
} else if (IsBoolean()) {
Isolate* isolate = HeapObject::cast(this)->GetIsolate();
- Context* global_context = isolate->context()->global_context();
- return CreateJSValue(global_context->boolean_function(), this);
+ Context* native_context = isolate->context()->native_context();
+ return CreateJSValue(native_context->boolean_function(), this);
} else if (IsString()) {
Isolate* isolate = HeapObject::cast(this)->GetIsolate();
- Context* global_context = isolate->context()->global_context();
- return CreateJSValue(global_context->string_function(), this);
+ Context* native_context = isolate->context()->native_context();
+ return CreateJSValue(native_context->string_function(), this);
}
// Throw a type error.
@@ -139,13 +134,16 @@ void Object::Lookup(String* name, LookupResult* result) {
if (IsJSReceiver()) {
holder = this;
} else {
- Context* global_context = Isolate::Current()->context()->global_context();
+ Context* native_context = Isolate::Current()->context()->native_context();
if (IsNumber()) {
- holder = global_context->number_function()->instance_prototype();
+ holder = native_context->number_function()->instance_prototype();
} else if (IsString()) {
- holder = global_context->string_function()->instance_prototype();
+ holder = native_context->string_function()->instance_prototype();
} else if (IsBoolean()) {
- holder = global_context->boolean_function()->instance_prototype();
+ holder = native_context->boolean_function()->instance_prototype();
+ } else {
+ Isolate::Current()->PushStackTraceAndDie(
+ 0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001);
}
}
ASSERT(holder != NULL); // Cannot handle null or undefined.
@@ -183,8 +181,19 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
// api style callbacks.
if (structure->IsAccessorInfo()) {
AccessorInfo* data = AccessorInfo::cast(structure);
+ if (!data->IsCompatibleReceiver(receiver)) {
+ Handle<Object> name_handle(name);
+ Handle<Object> receiver_handle(receiver);
+ Handle<Object> args[2] = { name_handle, receiver_handle };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("incompatible_method_receiver",
+ HandleVector(args,
+ ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
+ }
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ if (call_fun == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
JSObject* self = JSObject::cast(receiver);
Handle<String> key(name);
@@ -201,7 +210,9 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
if (result.IsEmpty()) {
return isolate->heap()->undefined_value();
}
- return *v8::Utils::OpenHandle(*result);
+ Object* return_value = *v8::Utils::OpenHandle(*result);
+ return_value->VerifyApiCallResultType();
+ return return_value;
}
// __defineGetter__ callback
@@ -253,13 +264,14 @@ MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
}
-MaybeObject* JSProxy::SetElementWithHandler(uint32_t index,
+MaybeObject* JSProxy::SetElementWithHandler(JSReceiver* receiver,
+ uint32_t index,
Object* value,
StrictModeFlag strict_mode) {
String* name;
MaybeObject* maybe = GetHeap()->Uint32ToString(index);
if (!maybe->To<String>(&name)) return maybe;
- return SetPropertyWithHandler(name, value, NONE, strict_mode);
+ return SetPropertyWithHandler(receiver, name, value, NONE, strict_mode);
}
@@ -400,16 +412,16 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
} else {
result->holder()->LocalLookupRealNamedProperty(name, &r);
}
- if (r.IsProperty()) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
- }
- break;
+ if (!r.IsFound()) break;
+ return GetPropertyAttributeWithFailedAccessCheck(receiver,
+ &r,
+ name,
+ continue_search);
}
- default:
+ case HANDLER:
+ case TRANSITION:
+ case NONEXISTENT:
UNREACHABLE();
}
}
@@ -475,10 +487,21 @@ MaybeObject* JSObject::SetNormalizedProperty(String* name,
set_properties(StringDictionary::cast(dict));
return value;
}
- // Preserve enumeration index.
- details = PropertyDetails(details.attributes(),
- details.type(),
- property_dictionary()->DetailsAt(entry).index());
+
+ PropertyDetails original_details = property_dictionary()->DetailsAt(entry);
+ int enumeration_index;
+ // Preserve the enumeration index unless the property was deleted.
+ if (original_details.IsDeleted()) {
+ enumeration_index = property_dictionary()->NextEnumerationIndex();
+ property_dictionary()->SetNextEnumerationIndex(enumeration_index + 1);
+ } else {
+ enumeration_index = original_details.dictionary_index();
+ ASSERT(enumeration_index > 0);
+ }
+
+ details = PropertyDetails(
+ details.attributes(), details.type(), enumeration_index);
+
if (IsGlobalObject()) {
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry));
@@ -506,11 +529,12 @@ MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
// map change to invalidate any ICs that think they can load
// from the DontDelete cell without checking if it contains
// the hole value.
- Object* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
- set_map(Map::cast(new_map));
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ ASSERT(new_map->is_dictionary_map());
+ set_map(new_map);
}
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
@@ -543,7 +567,7 @@ bool JSObject::IsDirty() {
// If the object is fully fast case and has the same map it was
// created with then no changes can have been made to it.
return map() != fun->initial_map()
- || !HasFastElements()
+ || !HasFastObjectElements()
|| !HasFastProperties();
}
@@ -627,15 +651,12 @@ MaybeObject* Object::GetProperty(Object* receiver,
receiver, result->GetCallbackObject(), name);
case HANDLER:
return result->proxy()->GetPropertyWithHandler(receiver, name);
- case INTERCEPTOR: {
- JSObject* recvr = JSObject::cast(receiver);
+ case INTERCEPTOR:
return result->holder()->GetPropertyWithInterceptor(
- recvr, name, attributes);
- }
- case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
+ receiver, name, attributes);
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
break;
}
UNREACHABLE();
@@ -656,13 +677,13 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
holder = holder->GetPrototype()) {
if (!holder->IsJSObject()) {
Isolate* isolate = heap->isolate();
- Context* global_context = isolate->context()->global_context();
+ Context* native_context = isolate->context()->native_context();
if (holder->IsNumber()) {
- holder = global_context->number_function()->instance_prototype();
+ holder = native_context->number_function()->instance_prototype();
} else if (holder->IsString()) {
- holder = global_context->string_function()->instance_prototype();
+ holder = native_context->string_function()->instance_prototype();
} else if (holder->IsBoolean()) {
- holder = global_context->boolean_function()->instance_prototype();
+ holder = native_context->boolean_function()->instance_prototype();
} else if (holder->IsJSProxy()) {
return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
} else {
@@ -704,7 +725,7 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
Object* Object::GetPrototype() {
if (IsSmi()) {
Heap* heap = Isolate::Current()->heap();
- Context* context = heap->isolate()->context()->global_context();
+ Context* context = heap->isolate()->context()->native_context();
return context->number_function()->instance_prototype();
}
@@ -716,7 +737,7 @@ Object* Object::GetPrototype() {
return heap_object->map()->prototype();
}
Heap* heap = heap_object->GetHeap();
- Context* context = heap->isolate()->context()->global_context();
+ Context* context = heap->isolate()->context()->native_context();
if (heap_object->IsHeapNumber()) {
return context->number_function()->instance_prototype();
@@ -758,7 +779,6 @@ MaybeObject* Object::GetHash(CreationFlag flag) {
bool Object::SameValue(Object* other) {
if (other == this) return true;
- if (!IsHeapObject() || !other->IsHeapObject()) return false;
// The object is either a number, a string, an odd-ball,
// a real JS object, or a Harmony proxy.
@@ -795,7 +815,7 @@ void Object::ShortPrint(StringStream* accumulator) {
void Smi::SmiPrint(FILE* out) {
- PrintF(out, "%d", value());
+ FPrintF(out, "%d", value());
}
@@ -810,7 +830,7 @@ void Failure::FailurePrint(StringStream* accumulator) {
void Failure::FailurePrint(FILE* out) {
- PrintF(out, "Failure(%p)", reinterpret_cast<void*>(value()));
+ FPrintF(out, "Failure(%p)", reinterpret_cast<void*>(value()));
}
@@ -1067,7 +1087,9 @@ void String::StringShortPrint(StringStream* accumulator) {
void JSObject::JSObjectShortPrint(StringStream* accumulator) {
switch (map()->instance_type()) {
case JS_ARRAY_TYPE: {
- double length = JSArray::cast(this)->length()->Number();
+ double length = JSArray::cast(this)->length()->IsUndefined()
+ ? 0
+ : JSArray::cast(this)->length()->Number();
accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
break;
}
@@ -1146,19 +1168,19 @@ void JSObject::PrintElementsTransition(
FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
ElementsKind to_kind, FixedArrayBase* to_elements) {
if (from_kind != to_kind) {
- PrintF(file, "elements transition [");
+ FPrintF(file, "elements transition [");
PrintElementsKind(file, from_kind);
- PrintF(file, " -> ");
+ FPrintF(file, " -> ");
PrintElementsKind(file, to_kind);
- PrintF(file, "] in ");
+ FPrintF(file, "] in ");
JavaScriptFrame::PrintTop(file, false, true);
- PrintF(file, " for ");
+ FPrintF(file, " for ");
ShortPrint(file);
- PrintF(file, " from ");
+ FPrintF(file, " from ");
from_elements->ShortPrint(file);
- PrintF(file, " to ");
+ FPrintF(file, " to ");
to_elements->ShortPrint(file);
- PrintF(file, "\n");
+ FPrintF(file, "\n");
}
}
@@ -1318,7 +1340,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
SlicedString::BodyDescriptor::IterateBody(this, v);
break;
case kExternalStringTag:
- if ((type & kStringEncodingMask) == kAsciiStringTag) {
+ if ((type & kStringEncodingMask) == kOneByteStringTag) {
reinterpret_cast<ExternalAsciiString*>(this)->
ExternalAsciiStringIterateBody(v);
} else {
@@ -1392,8 +1414,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case EXTERNAL_DOUBLE_ARRAY_TYPE:
break;
case SHARED_FUNCTION_INFO_TYPE: {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
- shared->SharedFunctionInfoIterateBody(v);
+ SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
break;
}
@@ -1433,7 +1454,7 @@ Object* HeapNumber::HeapNumberToBoolean() {
void HeapNumber::HeapNumberPrint(FILE* out) {
- PrintF(out, "%.16g", Number());
+ FPrintF(out, "%.16g", Number());
}
@@ -1481,20 +1502,19 @@ String* JSReceiver::constructor_name() {
MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
String* name,
- Object* value) {
- int index = new_map->PropertyIndexFor(name);
+ Object* value,
+ int field_index) {
if (map()->unused_property_fields() == 0) {
- ASSERT(map()->unused_property_fields() == 0);
int new_unused = new_map->unused_property_fields();
- Object* values;
+ FixedArray* values;
{ MaybeObject* maybe_values =
properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->ToObject(&values)) return maybe_values;
+ if (!maybe_values->To(&values)) return maybe_values;
}
- set_properties(FixedArray::cast(values));
+ set_properties(values);
}
set_map(new_map);
- return FastPropertyAtPut(index, value);
+ return FastPropertyAtPut(field_index, value);
}
@@ -1516,96 +1536,66 @@ static bool IsIdentifier(UnicodeCache* cache,
MaybeObject* JSObject::AddFastProperty(String* name,
Object* value,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode) {
ASSERT(!IsJSGlobalProxy());
+ ASSERT(DescriptorArray::kNotFound ==
+ map()->instance_descriptors()->Search(
+ name, map()->NumberOfOwnDescriptors()));
// Normalize the object if the name is an actual string (not the
// hidden symbols) and is not a real identifier.
+ // Normalize the object if it will have too many fast properties.
Isolate* isolate = GetHeap()->isolate();
StringInputBuffer buffer(name);
- if (!IsIdentifier(isolate->unicode_cache(), &buffer)
- && name != isolate->heap()->hidden_symbol()) {
+ if ((!IsIdentifier(isolate->unicode_cache(), &buffer)
+ && name != isolate->heap()->hidden_symbol()) ||
+ (map()->unused_property_fields() == 0 &&
+ TooManyFastProperties(properties()->length(), store_mode))) {
Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_obj =
+ NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+
return AddSlowProperty(name, value, attributes);
}
- DescriptorArray* old_descriptors = map()->instance_descriptors();
// Compute the new index for new field.
int index = map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- FieldDescriptor new_field(name, index, attributes);
- Object* new_descriptors;
- { MaybeObject* maybe_new_descriptors =
- old_descriptors->CopyInsert(&new_field, REMOVE_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- }
-
- // Only allow map transition if the object isn't the global object and there
- // is not a transition for the name, or there's a transition for the name but
- // it's unrelated to properties.
- int descriptor_index = old_descriptors->Search(name);
-
- // Element transitions are stored in the descriptor for property "", which is
- // not a identifier and should have forced a switch to slow properties above.
- ASSERT(descriptor_index == DescriptorArray::kNotFound ||
- old_descriptors->GetType(descriptor_index) != ELEMENTS_TRANSITION);
- bool can_insert_transition = descriptor_index == DescriptorArray::kNotFound ||
- old_descriptors->GetType(descriptor_index) == ELEMENTS_TRANSITION;
- bool allow_map_transition =
- can_insert_transition &&
- (isolate->context()->global_context()->object_function()->map() != map());
+ FieldDescriptor new_field(name, index, attributes, 0);
ASSERT(index < map()->inobject_properties() ||
(index - map()->inobject_properties()) < properties()->length() ||
map()->unused_property_fields() == 0);
- // Allocate a new map for the object.
- Object* r;
- { MaybeObject* maybe_r = map()->CopyDropDescriptors();
- if (!maybe_r->ToObject(&r)) return maybe_r;
- }
- Map* new_map = Map::cast(r);
- if (allow_map_transition) {
- // Allocate new instance descriptors for the old map with map transition.
- MapTransitionDescriptor d(name, Map::cast(new_map), attributes);
- Object* r;
- { MaybeObject* maybe_r = old_descriptors->CopyInsert(&d, KEEP_TRANSITIONS);
- if (!maybe_r->ToObject(&r)) return maybe_r;
- }
- old_descriptors = DescriptorArray::cast(r);
- }
+
+ FixedArray* values = NULL;
if (map()->unused_property_fields() == 0) {
- if (properties()->length() > MaxFastProperties()) {
- Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return AddSlowProperty(name, value, attributes);
- }
// Make room for the new value
- Object* values;
- { MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_values->ToObject(&values)) return maybe_values;
- }
- set_properties(FixedArray::cast(values));
+ MaybeObject* maybe_values =
+ properties()->CopySize(properties()->length() + kFieldsAdded);
+ if (!maybe_values->To(&values)) return maybe_values;
+ }
+
+ // Only allow map transition if the object isn't the global object.
+ TransitionFlag flag = isolate->empty_object_map() != map()
+ ? INSERT_TRANSITION
+ : OMIT_TRANSITION;
+
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ if (map()->unused_property_fields() == 0) {
+ ASSERT(values != NULL);
+ set_properties(values);
new_map->set_unused_property_fields(kFieldsAdded - 1);
} else {
new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
}
- // We have now allocated all the necessary objects.
- // All the changes can be applied at once, so they are atomic.
- map()->set_instance_descriptors(old_descriptors);
- new_map->SetBackPointer(map());
- new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+
set_map(new_map);
return FastPropertyAtPut(index, value);
}
@@ -1616,57 +1606,26 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
JSFunction* function,
PropertyAttributes attributes) {
// Allocate new instance descriptors with (name, function) added
- ConstantFunctionDescriptor d(name, function, attributes);
- Object* new_descriptors;
- { MaybeObject* maybe_new_descriptors =
- map()->instance_descriptors()->CopyInsert(&d, REMOVE_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- }
-
- // Allocate a new map for the object.
- Object* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
-
- DescriptorArray* descriptors = DescriptorArray::cast(new_descriptors);
- Map::cast(new_map)->set_instance_descriptors(descriptors);
- Map* old_map = map();
- set_map(Map::cast(new_map));
+ ConstantFunctionDescriptor d(name, function, attributes, 0);
- // If the old map is the global object map (from new Object()),
- // then transitions are not added to it, so we are done.
Heap* heap = GetHeap();
- if (old_map == heap->isolate()->context()->global_context()->
- object_function()->map()) {
- return function;
- }
-
- // Do not add CONSTANT_TRANSITIONS to global objects
- if (IsGlobalObject()) {
- return function;
- }
+ TransitionFlag flag =
+ // Do not add transitions to the empty object map (map of "new Object()"),
+ // nor to global objects.
+ (map() == heap->isolate()->empty_object_map() || IsGlobalObject() ||
+ // Don't add transitions to special properties with non-trivial
+ // attributes.
+ // TODO(verwaest): Once we support attribute changes, these transitions
+ // should be kept as well.
+ attributes != NONE)
+ ? OMIT_TRANSITION
+ : INSERT_TRANSITION;
- // Add a CONSTANT_TRANSITION descriptor to the old map,
- // so future assignments to this property on other objects
- // of the same type will create a normal field, not a constant function.
- // Don't do this for special properties, with non-trival attributes.
- if (attributes != NONE) {
- return function;
- }
- ConstTransitionDescriptor mark(name, Map::cast(new_map));
- { MaybeObject* maybe_new_descriptors =
- old_map->instance_descriptors()->CopyInsert(&mark, KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- // We have accomplished the main goal, so return success.
- return function;
- }
- }
- old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- Map::cast(new_map)->SetBackPointer(old_map);
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ set_map(new_map);
return function;
}
@@ -1712,11 +1671,15 @@ MaybeObject* JSObject::AddSlowProperty(String* name,
MaybeObject* JSObject::AddProperty(String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode,
+ ExtensibilityCheck extensibility_check) {
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
- if (!map_of_this->is_extensible()) {
+ MaybeObject* result;
+ if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
+ !map_of_this->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return value;
} else {
@@ -1726,28 +1689,70 @@ MaybeObject* JSObject::AddProperty(String* name,
HandleVector(args, 1)));
}
}
+
if (HasFastProperties()) {
// Ensure the descriptor array does not get too big.
- if (map_of_this->instance_descriptors()->number_of_descriptors() <
+ if (map_of_this->NumberOfOwnDescriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
if (value->IsJSFunction()) {
- return AddConstantFunctionProperty(name,
- JSFunction::cast(value),
- attributes);
+ result = AddConstantFunctionProperty(name,
+ JSFunction::cast(value),
+ attributes);
} else {
- return AddFastProperty(name, value, attributes);
+ result = AddFastProperty(name, value, attributes, store_mode);
}
} else {
// Normalize the object to prevent very large instance descriptors.
// This eliminates unwanted N^2 allocation and lookup behavior.
Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (!maybe->To(&obj)) return maybe;
+ result = AddSlowProperty(name, value, attributes);
}
+ } else {
+ result = AddSlowProperty(name, value, attributes);
+ }
+
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult)) return result;
+
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ EnqueueChangeRecord(handle(this), "new", handle(name),
+ handle(heap->the_hole_value()));
}
- return AddSlowProperty(name, value, attributes);
+
+ return *hresult;
+}
+
+
+void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
+ const char* type_str,
+ Handle<String> name,
+ Handle<Object> old_value) {
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope;
+ Handle<String> type = isolate->factory()->LookupAsciiSymbol(type_str);
+ Handle<Object> args[] = { type, object, name, old_value };
+ bool threw;
+ Execution::Call(Handle<JSFunction>(isolate->observers_notify_change()),
+ Handle<Object>(isolate->heap()->undefined_value()),
+ old_value->IsTheHole() ? 3 : 4, args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
+void JSObject::DeliverChangeRecords(Isolate* isolate) {
+ ASSERT(isolate->observer_delivery_pending());
+ bool threw = false;
+ Execution::Call(
+ isolate->observers_deliver_changes(),
+ isolate->factory()->undefined_value(),
+ 0,
+ NULL,
+ &threw);
+ ASSERT(!threw);
+ isolate->set_observer_delivery_pending(false);
}
@@ -1755,25 +1760,25 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ ExtensibilityCheck extensibility_check) {
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
+ if (!result.IsFound()) map()->LookupTransition(this, name, &result);
if (result.IsFound()) {
- // An existing property, a map transition or a null descriptor was
- // found. Use set property to handle all these cases.
+ // An existing property or a map transition was found. Use set property to
+ // handle all these cases.
return SetProperty(&result, name, value, attributes, strict_mode);
}
- bool found = false;
+ bool done = false;
MaybeObject* result_object;
- result_object = SetPropertyWithCallbackSetterInPrototypes(name,
- value,
- attributes,
- &found,
- strict_mode);
- if (found) return result_object;
+ result_object =
+ SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
+ if (done) return result_object;
// Add a new real property.
- return AddProperty(name, value, attributes, strict_mode);
+ return AddProperty(name, value, attributes, strict_mode,
+ MAY_BE_STORE_FROM_KEYED, extensibility_check);
}
@@ -1785,8 +1790,7 @@ MaybeObject* JSObject::ReplaceSlowProperty(String* name,
int new_enumeration_index = 0; // 0 means "Use the next available index."
if (old_index != -1) {
// All calls to ReplaceSlowProperty have had all transitions removed.
- ASSERT(!dictionary->ContainsTransition(old_index));
- new_enumeration_index = dictionary->DetailsAt(old_index).index();
+ new_enumeration_index = dictionary->DetailsAt(old_index).dictionary_index();
}
PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
@@ -1794,39 +1798,52 @@ MaybeObject* JSObject::ReplaceSlowProperty(String* name,
}
-MaybeObject* JSObject::ConvertDescriptorToFieldAndMapTransition(
+MaybeObject* JSObject::ConvertTransitionToMapTransition(
+ int transition_index,
String* name,
Object* new_value,
PropertyAttributes attributes) {
Map* old_map = map();
+ Map* old_target = old_map->GetTransition(transition_index);
Object* result;
- { MaybeObject* maybe_result =
- ConvertDescriptorToField(name, new_value, attributes);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // If we get to this point we have succeeded - do not return failure
- // after this point. Later stuff is optional.
- if (!HasFastProperties()) {
- return result;
- }
- // Do not add transitions to the map of "new Object()".
- if (map() == GetIsolate()->context()->global_context()->
- object_function()->map()) {
- return result;
- }
- MapTransitionDescriptor transition(name,
- map(),
- attributes);
- Object* new_descriptors;
- { MaybeObject* maybe_new_descriptors = old_map->instance_descriptors()->
- CopyInsert(&transition, KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return result; // Yes, return _result_.
- }
- }
- old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- map()->SetBackPointer(old_map);
+ MaybeObject* maybe_result =
+ ConvertDescriptorToField(name, new_value, attributes);
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ if (!HasFastProperties()) return result;
+
+ // This method should only be used to convert existing transitions. Objects
+ // with the map of "new Object()" cannot have transitions in the first place.
+ Map* new_map = map();
+ ASSERT(new_map != GetIsolate()->empty_object_map());
+
+ // TODO(verwaest): From here on we lose existing map transitions, causing
+ // invalid back pointers. This will change once we can store multiple
+ // transitions with the same key.
+
+ bool owned_descriptors = old_map->owns_descriptors();
+ if (owned_descriptors ||
+ old_target->instance_descriptors() == old_map->instance_descriptors()) {
+ // Since the conversion above generated a new fast map with an additional
+ // property which can be shared as well, install this descriptor pointer
+ // along the entire chain of smaller maps.
+ Map* map;
+ DescriptorArray* new_descriptors = new_map->instance_descriptors();
+ DescriptorArray* old_descriptors = old_map->instance_descriptors();
+ for (Object* current = old_map;
+ !current->IsUndefined();
+ current = map->GetBackPointer()) {
+ map = Map::cast(current);
+ if (map->instance_descriptors() != old_descriptors) break;
+ map->SetEnumLength(Map::kInvalidEnumCache);
+ map->set_instance_descriptors(new_descriptors);
+ }
+ old_map->set_owns_descriptors(false);
+ }
+
+ old_map->SetTransition(transition_index, new_map);
+ new_map->SetBackPointer(old_map);
return result;
}
@@ -1835,59 +1852,38 @@ MaybeObject* JSObject::ConvertDescriptorToField(String* name,
Object* new_value,
PropertyAttributes attributes) {
if (map()->unused_property_fields() == 0 &&
- properties()->length() > MaxFastProperties()) {
+ TooManyFastProperties(properties()->length(), MAY_BE_STORE_FROM_KEYED)) {
Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
return ReplaceSlowProperty(name, new_value, attributes);
}
int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes);
- // Make a new DescriptorArray replacing an entry with FieldDescriptor.
- Object* descriptors_unchecked;
- { MaybeObject* maybe_descriptors_unchecked = map()->instance_descriptors()->
- CopyInsert(&new_field, REMOVE_TRANSITIONS);
- if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
- return maybe_descriptors_unchecked;
- }
- }
- DescriptorArray* new_descriptors =
- DescriptorArray::cast(descriptors_unchecked);
+ FieldDescriptor new_field(name, index, attributes, 0);
// Make a new map for the object.
- Object* new_map_unchecked;
- { MaybeObject* maybe_new_map_unchecked = map()->CopyDropDescriptors();
- if (!maybe_new_map_unchecked->ToObject(&new_map_unchecked)) {
- return maybe_new_map_unchecked;
- }
- }
- Map* new_map = Map::cast(new_map_unchecked);
- new_map->set_instance_descriptors(new_descriptors);
+ Map* new_map;
+ MaybeObject* maybe_new_map = map()->CopyInsertDescriptor(&new_field,
+ OMIT_TRANSITION);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
// Make new properties array if necessary.
- FixedArray* new_properties = 0; // Will always be NULL or a valid pointer.
+ FixedArray* new_properties = NULL;
int new_unused_property_fields = map()->unused_property_fields() - 1;
if (map()->unused_property_fields() == 0) {
new_unused_property_fields = kFieldsAdded - 1;
- Object* new_properties_object;
- { MaybeObject* maybe_new_properties_object =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_new_properties_object->ToObject(&new_properties_object)) {
- return maybe_new_properties_object;
- }
- }
- new_properties = FixedArray::cast(new_properties_object);
+ MaybeObject* maybe_new_properties =
+ properties()->CopySize(properties()->length() + kFieldsAdded);
+ if (!maybe_new_properties->To(&new_properties)) return maybe_new_properties;
}
// Update pointers to commit changes.
// Object points to the new map.
new_map->set_unused_property_fields(new_unused_property_fields);
set_map(new_map);
- if (new_properties) {
- set_properties(FixedArray::cast(new_properties));
+ if (new_properties != NULL) {
+ set_properties(new_properties);
}
return FastPropertyAtPut(index, new_value);
}
@@ -1930,7 +1926,8 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
this_handle->SetPropertyPostInterceptor(*name_handle,
*value_handle,
attributes,
- strict_mode);
+ strict_mode,
+ PERFORM_EXTENSIBILITY_CHECK);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
@@ -1944,7 +1941,7 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
bool skip_fallback_interceptor) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetProperty(*key, *value, attributes, strict_mode,
- skip_fallback_interceptor),
+ MAY_BE_STORE_FROM_KEYED, skip_fallback_interceptor),
Object);
}
@@ -1953,10 +1950,14 @@ MaybeObject* JSReceiver::SetProperty(String* name,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode,
bool skip_fallback_interceptor) {
LookupResult result(GetIsolate());
LocalLookup(name, &result, skip_fallback_interceptor);
- return SetProperty(&result, name, value, attributes, strict_mode);
+ if (!result.IsFound()) {
+ map()->LookupTransition(JSObject::cast(this), name, &result);
+ }
+ return SetProperty(&result, name, value, attributes, strict_mode, store_mode);
}
@@ -1989,6 +1990,16 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (structure->IsAccessorInfo()) {
// api style callbacks
AccessorInfo* data = AccessorInfo::cast(structure);
+ if (!data->IsCompatibleReceiver(this)) {
+ Handle<Object> name_handle(name);
+ Handle<Object> receiver_handle(this);
+ Handle<Object> args[2] = { name_handle, receiver_handle };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("incompatible_method_receiver",
+ HandleVector(args,
+ ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
+ }
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
@@ -2054,26 +2065,6 @@ MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
}
-void JSObject::LookupCallbackSetterInPrototypes(String* name,
- LookupResult* result) {
- Heap* heap = GetHeap();
- for (Object* pt = GetPrototype();
- pt != heap->null_value();
- pt = pt->GetPrototype()) {
- if (pt->IsJSProxy()) {
- return result->HandlerResult(JSProxy::cast(pt));
- }
- JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty()) {
- if (result->type() == CALLBACKS && !result->IsReadOnly()) return;
- // Found non-callback or read-only callback, stop looking.
- break;
- }
- }
- result->NotFound();
-}
-
-
MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
uint32_t index,
Object* value,
@@ -2090,8 +2081,8 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
*found = true; // Force abort
return maybe;
}
- return JSProxy::cast(pt)->SetPropertyWithHandlerIfDefiningSetter(
- name, value, NONE, strict_mode, found);
+ return JSProxy::cast(pt)->SetPropertyViaPrototypesWithHandler(
+ this, name, value, NONE, strict_mode, found);
}
if (!JSObject::cast(pt)->HasDictionaryElements()) {
continue;
@@ -2115,76 +2106,167 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
return heap->the_hole_value();
}
-MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes(
+MaybeObject* JSObject::SetPropertyViaPrototypes(
String* name,
Object* value,
PropertyAttributes attributes,
- bool* found,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ bool* done) {
Heap* heap = GetHeap();
+ Isolate* isolate = heap->isolate();
+
+ *done = false;
// We could not find a local property so let's check whether there is an
- // accessor that wants to handle the property.
- LookupResult accessor_result(heap->isolate());
- LookupCallbackSetterInPrototypes(name, &accessor_result);
- if (accessor_result.IsFound()) {
- *found = true;
- if (accessor_result.type() == CALLBACKS) {
- return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
- name,
- value,
- accessor_result.holder(),
- strict_mode);
- } else if (accessor_result.type() == HANDLER) {
- // There is a proxy in the prototype chain. Invoke its
- // getPropertyDescriptor trap.
- bool found = false;
- // SetPropertyWithHandlerIfDefiningSetter can cause GC,
- // make sure to use the handlified references after calling
- // the function.
- Handle<JSObject> self(this);
- Handle<String> hname(name);
- Handle<Object> hvalue(value);
- MaybeObject* result =
- accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
- name, value, attributes, strict_mode, &found);
- if (found) return result;
- // The proxy does not define the property as an accessor.
- // Consequently, it has no effect on setting the receiver.
- return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
+ // accessor that wants to handle the property, or whether the property is
+ // read-only on the prototype chain.
+ LookupResult result(isolate);
+ LookupRealNamedPropertyInPrototypes(name, &result);
+ if (result.IsFound()) {
+ switch (result.type()) {
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ *done = result.IsReadOnly();
+ break;
+ case INTERCEPTOR: {
+ PropertyAttributes attr =
+ result.holder()->GetPropertyAttributeWithInterceptor(
+ this, name, true);
+ *done = !!(attr & READ_ONLY);
+ break;
+ }
+ case CALLBACKS: {
+ if (!FLAG_es5_readonly && result.IsReadOnly()) break;
+ *done = true;
+ return SetPropertyWithCallback(result.GetCallbackObject(),
+ name, value, result.holder(), strict_mode);
+ }
+ case HANDLER: {
+ return result.proxy()->SetPropertyViaPrototypesWithHandler(
+ this, name, value, attributes, strict_mode, done);
+ }
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
}
}
- *found = false;
+
+ // If we get here with *done true, we have encountered a read-only property.
+ if (!FLAG_es5_readonly) *done = false;
+ if (*done) {
+ if (strict_mode == kNonStrictMode) return value;
+ Handle<Object> args[] = { Handle<Object>(name), Handle<Object>(this)};
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ }
return heap->the_hole_value();
}
-void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
- DescriptorArray* descriptors = map()->instance_descriptors();
- int number = descriptors->SearchWithCache(name);
- if (number != DescriptorArray::kNotFound) {
- result->DescriptorResult(this, descriptors->GetDetails(number), number);
- } else {
- result->NotFound();
+enum RightTrimMode { FROM_GC, FROM_MUTATOR };
+
+
+static void ZapEndOfFixedArray(Address new_end, int to_trim) {
+ // If we are doing a big trim in old space then we zap the space.
+ Object** zap = reinterpret_cast<Object**>(new_end);
+ zap++; // Header of filler must be at least one word so skip that.
+ for (int i = 1; i < to_trim; i++) {
+ *zap++ = Smi::FromInt(0);
}
}
-void Map::LookupInDescriptors(JSObject* holder,
- String* name,
- LookupResult* result) {
- DescriptorArray* descriptors = instance_descriptors();
- DescriptorLookupCache* cache =
- GetHeap()->isolate()->descriptor_lookup_cache();
- int number = cache->Lookup(descriptors, name);
- if (number == DescriptorLookupCache::kAbsent) {
- number = descriptors->Search(name);
- cache->Update(descriptors, name, number);
- }
- if (number != DescriptorArray::kNotFound) {
- result->DescriptorResult(holder, descriptors->GetDetails(number), number);
- } else {
- result->NotFound();
+template<RightTrimMode trim_mode>
+static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
+ ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ // For now this trick is only applied to fixed arrays in new and paged space.
+ ASSERT(!HEAP->lo_space()->Contains(elms));
+
+ const int len = elms->length();
+
+ ASSERT(to_trim < len);
+
+ Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
+
+ if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
+ ZapEndOfFixedArray(new_end, to_trim);
}
+
+ int size_delta = to_trim * kPointerSize;
+
+ // Technically in new space this write might be omitted (except for
+ // debug mode which iterates through the heap), but to play safer
+ // we still do it.
+ heap->CreateFillerObjectAt(new_end, size_delta);
+
+ elms->set_length(len - to_trim);
+
+ // Maintain marking consistency for IncrementalMarking.
+ if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
+ if (trim_mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
+ } else {
+ MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
+ }
+ }
+}
+
+
+void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ if (slack <= descriptors->NumberOfSlackDescriptors()) return;
+ int number_of_descriptors = descriptors->number_of_descriptors();
+ Isolate* isolate = map->GetIsolate();
+ Handle<DescriptorArray> new_descriptors =
+ isolate->factory()->NewDescriptorArray(number_of_descriptors, slack);
+ DescriptorArray::WhitenessWitness witness(*new_descriptors);
+
+ for (int i = 0; i < number_of_descriptors; ++i) {
+ new_descriptors->CopyFrom(i, *descriptors, i, witness);
+ }
+
+ map->set_instance_descriptors(*new_descriptors);
+}
+
+
+void Map::AppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors) {
+ Isolate* isolate = map->GetIsolate();
+ Handle<DescriptorArray> array(map->instance_descriptors());
+ NeanderArray callbacks(descriptors);
+ int nof_callbacks = callbacks.length();
+
+ ASSERT(array->NumberOfSlackDescriptors() >= nof_callbacks);
+
+ // Ensure the keys are symbols before writing them into the instance
+ // descriptor. Since it may cause a GC, it has to be done before we
+ // temporarily put the heap in an invalid state while appending descriptors.
+ for (int i = 0; i < nof_callbacks; ++i) {
+ Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i)));
+ Handle<String> key =
+ isolate->factory()->SymbolFromString(
+ Handle<String>(String::cast(entry->name())));
+ entry->set_name(*key);
+ }
+
+ int nof = map->NumberOfOwnDescriptors();
+
+ // Fill in new callback descriptors. Process the callbacks from
+ // back to front so that the last callback with a given name takes
+ // precedence over previously added callbacks with that name.
+ for (int i = nof_callbacks - 1; i >= 0; i--) {
+ AccessorInfo* entry = AccessorInfo::cast(callbacks.get(i));
+ String* key = String::cast(entry->name());
+ // Check if a descriptor with this name already exists before writing.
+ if (array->Search(key, nof) == DescriptorArray::kNotFound) {
+ CallbacksDescriptor desc(key, entry, entry->property_attributes());
+ array->Append(&desc);
+ nof += 1;
+ }
+ }
+
+ map->SetNumberOfOwnDescriptors(nof);
}
@@ -2205,217 +2287,88 @@ static Handle<T> MaybeNull(T* p) {
Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
- ElementsKind elms_kind = elements_kind();
- if (elms_kind == FAST_DOUBLE_ELEMENTS) {
- bool dummy = true;
- Handle<Map> fast_map =
- MaybeNull(LookupElementsTransitionMap(FAST_ELEMENTS, &dummy));
- if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
- return fast_map;
- }
- return Handle<Map>::null();
- }
- if (elms_kind == FAST_SMI_ONLY_ELEMENTS) {
- bool dummy = true;
- Handle<Map> double_map =
- MaybeNull(LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, &dummy));
- // In the current implementation, if the DOUBLE map doesn't exist, the
- // FAST map can't exist either.
- if (double_map.is_null()) return Handle<Map>::null();
- Handle<Map> fast_map =
- MaybeNull(double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
- &dummy));
- if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
- return fast_map;
- }
- if (ContainsMap(candidates, double_map)) return double_map;
- }
- return Handle<Map>::null();
-}
-
-static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
- ElementsKind elements_kind) {
- if (descriptor_contents->IsMap()) {
- Map* map = Map::cast(descriptor_contents);
- if (map->elements_kind() == elements_kind) {
- return map;
- }
- return NULL;
- }
-
- FixedArray* map_array = FixedArray::cast(descriptor_contents);
- for (int i = 0; i < map_array->length(); ++i) {
- Object* current = map_array->get(i);
- // Skip undefined slots, they are sentinels for reclaimed maps.
- if (!current->IsUndefined()) {
- Map* current_map = Map::cast(map_array->get(i));
- if (current_map->elements_kind() == elements_kind) {
- return current_map;
+ ElementsKind kind = elements_kind();
+ Handle<Map> transitioned_map = Handle<Map>::null();
+ Handle<Map> current_map(this);
+ bool packed = IsFastPackedElementsKind(kind);
+ if (IsTransitionableFastElementsKind(kind)) {
+ while (CanTransitionToMoreGeneralFastElementsKind(kind, false)) {
+ kind = GetNextMoreGeneralFastElementsKind(kind, false);
+ Handle<Map> maybe_transitioned_map =
+ MaybeNull(current_map->LookupElementsTransitionMap(kind));
+ if (maybe_transitioned_map.is_null()) break;
+ if (ContainsMap(candidates, maybe_transitioned_map) &&
+ (packed || !IsFastPackedElementsKind(kind))) {
+ transitioned_map = maybe_transitioned_map;
+ if (!IsFastPackedElementsKind(kind)) packed = false;
}
+ current_map = maybe_transitioned_map;
}
}
-
- return NULL;
+ return transitioned_map;
}
-static MaybeObject* AddElementsTransitionMapToDescriptor(
- Object* descriptor_contents,
- Map* new_map) {
- // Nothing was in the descriptor for an ELEMENTS_TRANSITION,
- // simply add the map.
- if (descriptor_contents == NULL) {
- return new_map;
- }
-
- // There was already a map in the descriptor, create a 2-element FixedArray
- // to contain the existing map plus the new one.
- FixedArray* new_array;
- Heap* heap = new_map->GetHeap();
- if (descriptor_contents->IsMap()) {
- // Must tenure, DescriptorArray expects no new-space objects.
- MaybeObject* maybe_new_array = heap->AllocateFixedArray(2, TENURED);
- if (!maybe_new_array->To<FixedArray>(&new_array)) {
- return maybe_new_array;
- }
- new_array->set(0, descriptor_contents);
- new_array->set(1, new_map);
- return new_array;
- }
+static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
+ Map* current_map = map;
+ int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
+ int to_index = IsFastElementsKind(to_kind)
+ ? GetSequenceIndexFromFastElementsKind(to_kind)
+ : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- // The descriptor already contained a list of maps for different ElementKinds
- // of ELEMENTS_TRANSITION, first check the existing array for an undefined
- // slot, and if that's not available, create a FixedArray to hold the existing
- // maps plus the new one and fill it in.
- FixedArray* array = FixedArray::cast(descriptor_contents);
- for (int i = 0; i < array->length(); ++i) {
- if (array->get(i)->IsUndefined()) {
- array->set(i, new_map);
- return array;
- }
- }
+ ASSERT(index <= to_index);
- // Must tenure, DescriptorArray expects no new-space objects.
- MaybeObject* maybe_new_array =
- heap->AllocateFixedArray(array->length() + 1, TENURED);
- if (!maybe_new_array->To<FixedArray>(&new_array)) {
- return maybe_new_array;
+ for (; index < to_index; ++index) {
+ if (!current_map->HasElementsTransition()) return current_map;
+ current_map = current_map->elements_transition_map();
}
- int i = 0;
- while (i < array->length()) {
- new_array->set(i, array->get(i));
- ++i;
+ if (!IsFastElementsKind(to_kind) && current_map->HasElementsTransition()) {
+ Map* next_map = current_map->elements_transition_map();
+ if (next_map->elements_kind() == to_kind) return next_map;
}
- new_array->set(i, new_map);
- return new_array;
+ ASSERT(IsFastElementsKind(to_kind)
+ ? current_map->elements_kind() == to_kind
+ : current_map->elements_kind() == TERMINAL_FAST_ELEMENTS_KIND);
+ return current_map;
}
-String* Map::elements_transition_sentinel_name() {
- return GetHeap()->empty_symbol();
+Map* Map::LookupElementsTransitionMap(ElementsKind to_kind) {
+ Map* to_map = FindClosestElementsTransition(this, to_kind);
+ if (to_map->elements_kind() == to_kind) return to_map;
+ return NULL;
}
-Object* Map::GetDescriptorContents(String* sentinel_name,
- bool* safe_to_add_transition) {
- // Get the cached index for the descriptors lookup, or find and cache it.
- DescriptorArray* descriptors = instance_descriptors();
- DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
- int index = cache->Lookup(descriptors, sentinel_name);
- if (index == DescriptorLookupCache::kAbsent) {
- index = descriptors->Search(sentinel_name);
- cache->Update(descriptors, sentinel_name, index);
- }
- // If the transition already exists, return its descriptor.
- if (index != DescriptorArray::kNotFound) {
- PropertyDetails details = descriptors->GetDetails(index);
- if (details.type() == ELEMENTS_TRANSITION) {
- return descriptors->GetValue(index);
- } else {
- if (safe_to_add_transition != NULL) {
- *safe_to_add_transition = false;
- }
- }
- }
- return NULL;
-}
+static MaybeObject* AddMissingElementsTransitions(Map* map,
+ ElementsKind to_kind) {
+ ASSERT(IsFastElementsKind(map->elements_kind()));
+ int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
+ int to_index = IsFastElementsKind(to_kind)
+ ? GetSequenceIndexFromFastElementsKind(to_kind)
+ : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ ASSERT(index <= to_index);
-Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind,
- bool* safe_to_add_transition) {
- // Special case: indirect SMI->FAST transition (cf. comment in
- // AddElementsTransition()).
- if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
- elements_kind == FAST_ELEMENTS) {
- Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS,
- safe_to_add_transition);
- if (double_map == NULL) return double_map;
- return double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
- safe_to_add_transition);
- }
- Object* descriptor_contents = GetDescriptorContents(
- elements_transition_sentinel_name(), safe_to_add_transition);
- if (descriptor_contents != NULL) {
- Map* maybe_transition_map =
- GetElementsTransitionMapFromDescriptor(descriptor_contents,
- elements_kind);
- ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap());
- return maybe_transition_map;
+ Map* current_map = map;
+
+ for (; index < to_index; ++index) {
+ ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(index + 1);
+ MaybeObject* maybe_next_map =
+ current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
+ if (!maybe_next_map->To(&current_map)) return maybe_next_map;
}
- return NULL;
-}
+ // In case we are exiting the fast elements kind system, just add the map in
+ // the end.
+ if (!IsFastElementsKind(to_kind)) {
+ MaybeObject* maybe_next_map =
+ current_map->CopyAsElementsKind(to_kind, INSERT_TRANSITION);
+ if (!maybe_next_map->To(&current_map)) return maybe_next_map;
+ }
-MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind,
- Map* transitioned_map) {
- // The map transition graph should be a tree, therefore the transition
- // from SMI to FAST elements is not done directly, but by going through
- // DOUBLE elements first.
- if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
- elements_kind == FAST_ELEMENTS) {
- bool safe_to_add = true;
- Map* double_map = this->LookupElementsTransitionMap(
- FAST_DOUBLE_ELEMENTS, &safe_to_add);
- // This method is only called when safe_to_add_transition has been found
- // to be true earlier.
- ASSERT(safe_to_add);
-
- if (double_map == NULL) {
- MaybeObject* maybe_map = this->CopyDropTransitions();
- if (!maybe_map->To(&double_map)) return maybe_map;
- double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
- MaybeObject* maybe_double_transition = this->AddElementsTransition(
- FAST_DOUBLE_ELEMENTS, double_map);
- if (maybe_double_transition->IsFailure()) return maybe_double_transition;
- }
- return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map);
- }
-
- bool safe_to_add_transition = true;
- Object* descriptor_contents = GetDescriptorContents(
- elements_transition_sentinel_name(), &safe_to_add_transition);
- // This method is only called when safe_to_add_transition has been found
- // to be true earlier.
- ASSERT(safe_to_add_transition);
- MaybeObject* maybe_new_contents =
- AddElementsTransitionMapToDescriptor(descriptor_contents,
- transitioned_map);
- Object* new_contents;
- if (!maybe_new_contents->ToObject(&new_contents)) {
- return maybe_new_contents;
- }
-
- ElementsTransitionDescriptor desc(elements_transition_sentinel_name(),
- new_contents);
- Object* new_descriptors;
- MaybeObject* maybe_new_descriptors =
- instance_descriptors()->CopyInsert(&desc, KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- transitioned_map->SetBackPointer(this);
- return this;
+ ASSERT(current_map->elements_kind() == to_kind);
+ return current_map;
}
@@ -2429,56 +2382,39 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
- Map* current_map = map();
- ElementsKind from_kind = current_map->elements_kind();
+ Map* start_map = map();
+ ElementsKind from_kind = start_map->elements_kind();
- if (from_kind == to_kind) return current_map;
+ if (from_kind == to_kind) {
+ return start_map;
+ }
- // Only objects with FastProperties can have DescriptorArrays and can track
- // element-related maps. Also don't add descriptors to maps that are shared.
- bool safe_to_add_transition = HasFastProperties() &&
- !current_map->IsUndefined() &&
- !current_map->is_shared();
+ bool allow_store_transition =
+ // Only remember the map transition if the object's map is NOT equal to
+ // the global object_function's map and there is not an already existing
+ // non-matching element transition.
+ (GetIsolate()->empty_object_map() != map()) &&
+ !start_map->IsUndefined() && !start_map->is_shared() &&
+ IsFastElementsKind(from_kind);
- // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects
- // with elements that switch back and forth between dictionary and fast
- // element mode.
- if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) {
- safe_to_add_transition = false;
+ // Only store fast element maps in ascending generality.
+ if (IsFastElementsKind(to_kind)) {
+ allow_store_transition &=
+ IsTransitionableFastElementsKind(from_kind) &&
+ IsMoreGeneralElementsKindTransition(from_kind, to_kind);
}
- if (safe_to_add_transition) {
- // It's only safe to manipulate the descriptor array if it would be
- // safe to add a transition.
- Map* maybe_transition_map = current_map->LookupElementsTransitionMap(
- to_kind, &safe_to_add_transition);
- if (maybe_transition_map != NULL) {
- return maybe_transition_map;
- }
+ if (!allow_store_transition) {
+ return start_map->CopyAsElementsKind(to_kind, OMIT_TRANSITION);
}
- Map* new_map = NULL;
+ Map* closest_map = FindClosestElementsTransition(start_map, to_kind);
- // No transition to an existing map for the given ElementsKind. Make a new
- // one.
- { MaybeObject* maybe_map = current_map->CopyDropTransitions();
- if (!maybe_map->To(&new_map)) return maybe_map;
+ if (closest_map->elements_kind() == to_kind) {
+ return closest_map;
}
- new_map->set_elements_kind(to_kind);
-
- // Only remember the map transition if the object's map is NOT equal to the
- // global object_function's map and there is not an already existing
- // non-matching element transition.
- Context* global_context = GetIsolate()->context()->global_context();
- bool allow_map_transition = safe_to_add_transition &&
- (global_context->object_function()->map() != map());
- if (allow_map_transition) {
- MaybeObject* maybe_transition =
- current_map->AddElementsTransition(to_kind, new_map);
- if (maybe_transition->IsFailure()) return maybe_transition;
- }
- return new_map;
+ return AddMissingElementsTransitions(closest_map, to_kind);
}
@@ -2493,47 +2429,47 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
}
if (HasFastProperties()) {
- LookupInDescriptor(name, result);
- if (result->IsFound()) {
- // A property, a map transition or a null descriptor was found.
- // We return all of these result types because
- // LocalLookupRealNamedProperty is used when setting properties
- // where map transitions and null descriptors are handled.
- ASSERT(result->holder() == this && result->type() != NORMAL);
- // Disallow caching for uninitialized constants. These can only
- // occur as fields.
- if (result->IsReadOnly() && result->type() == FIELD &&
- FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
- result->DisallowCaching();
- }
- return;
+ map()->LookupDescriptor(this, name, result);
+ // A property or a map transition was found. We return all of these result
+ // types because LocalLookupRealNamedProperty is used when setting
+ // properties where map transitions are handled.
+ ASSERT(!result->IsFound() ||
+ (result->holder() == this && result->IsFastPropertyType()));
+ // Disallow caching for uninitialized constants. These can only
+ // occur as fields.
+ if (result->IsField() &&
+ result->IsReadOnly() &&
+ FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
+ result->DisallowCaching();
}
- } else {
- int entry = property_dictionary()->FindEntry(name);
- if (entry != StringDictionary::kNotFound) {
- Object* value = property_dictionary()->ValueAt(entry);
- if (IsGlobalObject()) {
- PropertyDetails d = property_dictionary()->DetailsAt(entry);
- if (d.IsDeleted()) {
- result->NotFound();
- return;
- }
- value = JSGlobalPropertyCell::cast(value)->value();
+ return;
+ }
+
+ int entry = property_dictionary()->FindEntry(name);
+ if (entry != StringDictionary::kNotFound) {
+ Object* value = property_dictionary()->ValueAt(entry);
+ if (IsGlobalObject()) {
+ PropertyDetails d = property_dictionary()->DetailsAt(entry);
+ if (d.IsDeleted()) {
+ result->NotFound();
+ return;
}
- // Make sure to disallow caching for uninitialized constants
- // found in the dictionary-mode objects.
- if (value->IsTheHole()) result->DisallowCaching();
- result->DictionaryResult(this, entry);
- return;
+ value = JSGlobalPropertyCell::cast(value)->value();
}
+ // Make sure to disallow caching for uninitialized constants
+ // found in the dictionary-mode objects.
+ if (value->IsTheHole()) result->DisallowCaching();
+ result->DictionaryResult(this, entry);
+ return;
}
+
result->NotFound();
}
void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty()) return;
+ if (result->IsFound()) return;
LookupRealNamedPropertyInPrototypes(name, result);
}
@@ -2544,9 +2480,13 @@ void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
pt != heap->null_value();
- pt = JSObject::cast(pt)->GetPrototype()) {
+ pt = pt->GetPrototype()) {
+ if (pt->IsJSProxy()) {
+ return result->HandlerResult(JSProxy::cast(pt));
+ }
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty() && (result->type() != INTERCEPTOR)) return;
+ ASSERT(!(result->IsFound() && result->type() == INTERCEPTOR));
+ if (result->IsFound()) return;
}
result->NotFound();
}
@@ -2560,7 +2500,7 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
bool check_prototype,
StrictModeFlag strict_mode) {
if (check_prototype && !result->IsProperty()) {
- LookupCallbackSetterInPrototypes(name, result);
+ LookupRealNamedPropertyInPrototypes(name, result);
}
if (result->IsProperty()) {
@@ -2613,13 +2553,14 @@ MaybeObject* JSReceiver::SetProperty(LookupResult* result,
String* key,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- if (result->IsFound() && result->type() == HANDLER) {
+ StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode) {
+ if (result->IsHandler()) {
return result->proxy()->SetPropertyWithHandler(
- key, value, attributes, strict_mode);
+ this, key, value, attributes, strict_mode);
} else {
return JSObject::cast(this)->SetPropertyForResult(
- result, key, value, attributes, strict_mode);
+ result, key, value, attributes, strict_mode, store_mode);
}
}
@@ -2633,20 +2574,21 @@ bool JSProxy::HasPropertyWithHandler(String* name_raw) {
Handle<Object> args[] = { name };
Handle<Object> result = CallTrap(
"has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ if (isolate->has_pending_exception()) return false;
return result->ToBoolean()->IsTrue();
}
MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
+ JSReceiver* receiver_raw,
String* name_raw,
Object* value_raw,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
- Handle<Object> receiver(this);
+ Handle<JSReceiver> receiver(receiver_raw);
Handle<Object> name(name_raw);
Handle<Object> value(value_raw);
@@ -2658,77 +2600,92 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandlerIfDefiningSetter(
+MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
+ JSReceiver* receiver_raw,
String* name_raw,
Object* value_raw,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- bool* found) {
- *found = true; // except where defined otherwise...
- Isolate* isolate = GetHeap()->isolate();
+ bool* done) {
+ Isolate* isolate = GetIsolate();
Handle<JSProxy> proxy(this);
- Handle<Object> handler(this->handler()); // Trap might morph proxy.
+ Handle<JSReceiver> receiver(receiver_raw);
Handle<String> name(name_raw);
Handle<Object> value(value_raw);
+ Handle<Object> handler(this->handler()); // Trap might morph proxy.
+
+ *done = true; // except where redefined...
Handle<Object> args[] = { name };
Handle<Object> result = proxy->CallTrap(
"getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return Failure::Exception();
- if (!result->IsUndefined()) {
- // The proxy handler cares about this property.
- // Check whether it is virtualized as an accessor.
- // Emulate [[GetProperty]] semantics for proxies.
- bool has_pending_exception;
- Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
-
- Handle<String> conf_name =
- isolate->factory()->LookupAsciiSymbol("configurable_");
- Handle<Object> configurable(v8::internal::GetProperty(desc, conf_name));
- ASSERT(!isolate->has_pending_exception());
- if (configurable->IsFalse()) {
- Handle<String> trap =
- isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
- Handle<Object> args[] = { handler, trap, name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- ASSERT(configurable->IsTrue());
+ if (result->IsUndefined()) {
+ *done = false;
+ return GetHeap()->the_hole_value();
+ }
+
+ // Emulate [[GetProperty]] semantics for proxies.
+ bool has_pending_exception;
+ Handle<Object> argv[] = { result };
+ Handle<Object> desc =
+ Execution::Call(isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
+ if (has_pending_exception) return Failure::Exception();
- // Check for AccessorDescriptor.
- Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
- Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
+ // [[GetProperty]] requires to check that all properties are configurable.
+ Handle<String> configurable_name =
+ isolate->factory()->LookupAsciiSymbol("configurable_");
+ Handle<Object> configurable(
+ v8::internal::GetProperty(desc, configurable_name));
+ ASSERT(!isolate->has_pending_exception());
+ ASSERT(configurable->IsTrue() || configurable->IsFalse());
+ if (configurable->IsFalse()) {
+ Handle<String> trap =
+ isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ Handle<Object> args[] = { handler, trap, name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
+ }
+ ASSERT(configurable->IsTrue());
+
+ // Check for DataDescriptor.
+ Handle<String> hasWritable_name =
+ isolate->factory()->LookupAsciiSymbol("hasWritable_");
+ Handle<Object> hasWritable(v8::internal::GetProperty(desc, hasWritable_name));
+ ASSERT(!isolate->has_pending_exception());
+ ASSERT(hasWritable->IsTrue() || hasWritable->IsFalse());
+ if (hasWritable->IsTrue()) {
+ Handle<String> writable_name =
+ isolate->factory()->LookupAsciiSymbol("writable_");
+ Handle<Object> writable(v8::internal::GetProperty(desc, writable_name));
ASSERT(!isolate->has_pending_exception());
- if (!setter->IsUndefined()) {
- // We have a setter -- invoke it.
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return proxy->SetPropertyWithDefinedSetter(
- JSReceiver::cast(*setter), *value);
- } else {
- Handle<String> get_name = isolate->factory()->LookupAsciiSymbol("get_");
- Handle<Object> getter(v8::internal::GetProperty(desc, get_name));
- ASSERT(!isolate->has_pending_exception());
- if (!getter->IsUndefined()) {
- // We have a getter but no setter -- the property may not be
- // written. In strict mode, throw an error.
- if (strict_mode == kNonStrictMode) return *value;
- Handle<Object> args[] = { name, proxy };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "no_setter_in_callback", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- }
- // Fall-through.
+ ASSERT(writable->IsTrue() || writable->IsFalse());
+ *done = writable->IsFalse();
+ if (!*done) return GetHeap()->the_hole_value();
+ if (strict_mode == kNonStrictMode) return *value;
+ Handle<Object> args[] = { name, receiver };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ return isolate->Throw(*error);
}
- // The proxy does not define the property as an accessor.
- *found = false;
- return *value;
+ // We have an AccessorDescriptor.
+ Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
+ Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
+ ASSERT(!isolate->has_pending_exception());
+ if (!setter->IsUndefined()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ return receiver->SetPropertyWithDefinedSetter(
+ JSReceiver::cast(*setter), *value);
+ }
+
+ if (strict_mode == kNonStrictMode) return *value;
+ Handle<Object> args2[] = { name, proxy };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
+ return isolate->Throw(*error);
}
@@ -2736,7 +2693,7 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
String* name_raw, DeleteMode mode) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
- Handle<Object> receiver(this);
+ Handle<JSProxy> receiver(this);
Handle<Object> name(name_raw);
Handle<Object> args[] = { name };
@@ -2746,8 +2703,9 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
Object* bool_result = result->ToBoolean();
if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
+ Handle<Object> handler(receiver->handler());
Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
- Handle<Object> args[] = { Handle<Object>(handler()), trap_name };
+ Handle<Object> args[] = { handler, trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
"handler_failed", HandleVector(args, ARRAY_SIZE(args)));
isolate->Throw(*error);
@@ -2821,12 +2779,14 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
- JSReceiver* receiver,
+ JSReceiver* receiver_raw,
uint32_t index) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
+ Handle<JSProxy> proxy(this);
+ Handle<JSReceiver> receiver(receiver_raw);
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return GetPropertyAttributeWithHandler(receiver, *name);
+ return proxy->GetPropertyAttributeWithHandler(*receiver, *name);
}
@@ -2850,7 +2810,7 @@ void JSProxy::Fix() {
Object* hash;
if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
Handle<JSObject> new_self(JSObject::cast(*self));
- isolate->factory()->SetIdentityHash(new_self, hash);
+ isolate->factory()->SetIdentityHash(new_self, Smi::cast(hash));
}
}
@@ -2882,11 +2842,20 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
}
-MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
- String* name,
- Object* value,
+void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> map) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->AddFastPropertyUsingMap(*map));
+}
+
+
+MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
+ String* name_raw,
+ Object* value_raw,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
Heap* heap = GetHeap();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -2895,116 +2864,156 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. We make these short keys into symbols to avoid constantly
// reallocating them.
- if (!name->IsSymbol() && name->length() <= 2) {
+ if (!name_raw->IsSymbol() && name_raw->length() <= 2) {
Object* symbol_version;
- { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name);
+ { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name_raw);
if (maybe_symbol_version->ToObject(&symbol_version)) {
- name = String::cast(symbol_version);
+ name_raw = String::cast(symbol_version);
}
}
}
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ if (!heap->isolate()->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(
- result, name, value, true, strict_mode);
+ lookup, name_raw, value_raw, true, strict_mode);
}
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return value;
+ if (proto->IsNull()) return value_raw;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetPropertyForResult(
- result, name, value, attributes, strict_mode);
+ lookup, name_raw, value_raw, attributes, strict_mode, store_mode);
}
- if (!result->IsProperty() && !IsJSContextExtensionObject()) {
- bool found = false;
- MaybeObject* result_object;
- result_object = SetPropertyWithCallbackSetterInPrototypes(name,
- value,
- attributes,
- &found,
- strict_mode);
- if (found) return result_object;
- }
+ // From this point on everything needs to be handlified, because
+ // SetPropertyViaPrototypes might call back into JavaScript.
+ HandleScope scope(GetIsolate());
+ Handle<JSObject> self(this);
+ Handle<String> name(name_raw);
+ Handle<Object> value(value_raw);
- // At this point, no GC should have happened, as this would invalidate
- // 'result', which we cannot handlify!
+ if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) {
+ bool done = false;
+ MaybeObject* result_object = self->SetPropertyViaPrototypes(
+ *name, *value, attributes, strict_mode, &done);
+ if (done) return result_object;
+ }
- if (!result->IsFound()) {
+ if (!lookup->IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name, value, attributes, strict_mode);
+ return self->AddProperty(
+ *name, *value, attributes, strict_mode, store_mode);
}
- if (result->IsReadOnly() && result->IsProperty()) {
+
+ if (lookup->IsProperty() && lookup->IsReadOnly()) {
if (strict_mode == kStrictMode) {
- Handle<JSObject> self(this);
- Handle<String> hname(name);
- Handle<Object> args[] = { hname, self };
+ Handle<Object> args[] = { name, self };
return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
} else {
- return value;
+ return *value;
}
}
+
+ Handle<Object> old_value(heap->the_hole_value());
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ old_value = handle(lookup->GetLazyValue());
+ }
+
// This is a real property that is not read-only, or it is a
// transition or null descriptor and there are no setters in the prototypes.
- switch (result->type()) {
+ MaybeObject* result = *value;
+ switch (lookup->type()) {
case NORMAL:
- return SetNormalizedProperty(result, value);
+ result = self->SetNormalizedProperty(lookup, *value);
+ break;
case FIELD:
- return FastPropertyAtPut(result->GetFieldIndex(), value);
- case MAP_TRANSITION:
- if (attributes == result->GetAttributes()) {
- // Only use map transition if the attributes match.
- return AddFastPropertyUsingMap(result->GetTransitionMap(),
- name,
- value);
- }
- return ConvertDescriptorToField(name, value, attributes);
+ result = self->FastPropertyAtPut(lookup->GetFieldIndex(), *value);
+ break;
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
- if (value == result->GetConstantFunction()) return value;
+ if (*value == lookup->GetConstantFunction()) return *value;
// Preserve the attributes of this existing property.
- attributes = result->GetAttributes();
- return ConvertDescriptorToField(name, value, attributes);
- case CALLBACKS:
- return SetPropertyWithCallback(result->GetCallbackObject(),
- name,
- value,
- result->holder(),
- strict_mode);
+ attributes = lookup->GetAttributes();
+ result = self->ConvertDescriptorToField(*name, *value, attributes);
+ break;
+ case CALLBACKS: {
+ Object* callback_object = lookup->GetCallbackObject();
+ return self->SetPropertyWithCallback(callback_object,
+ *name,
+ *value,
+ lookup->holder(),
+ strict_mode);
+ }
case INTERCEPTOR:
- return SetPropertyWithInterceptor(name, value, attributes, strict_mode);
- case CONSTANT_TRANSITION: {
- // If the same constant function is being added we can simply
- // transition to the target map.
- Map* target_map = result->GetTransitionMap();
- DescriptorArray* target_descriptors = target_map->instance_descriptors();
- int number = target_descriptors->SearchWithCache(name);
- ASSERT(number != DescriptorArray::kNotFound);
- ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
- JSFunction* function =
- JSFunction::cast(target_descriptors->GetValue(number));
- if (value == function) {
- set_map(target_map);
- return value;
+ result = self->SetPropertyWithInterceptor(*name,
+ *value,
+ attributes,
+ strict_mode);
+ break;
+ case TRANSITION: {
+ Map* transition_map = lookup->GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
+
+ DescriptorArray* descriptors = transition_map->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+
+ if (details.type() == FIELD) {
+ if (attributes == details.attributes()) {
+ int field_index = descriptors->GetFieldIndex(descriptor);
+ result = self->AddFastPropertyUsingMap(transition_map,
+ *name,
+ *value,
+ field_index);
+ } else {
+ result = self->ConvertDescriptorToField(*name, *value, attributes);
+ }
+ } else if (details.type() == CALLBACKS) {
+ result = self->ConvertDescriptorToField(*name, *value, attributes);
+ } else {
+ ASSERT(details.type() == CONSTANT_FUNCTION);
+
+ Object* constant_function = descriptors->GetValue(descriptor);
+ if (constant_function == *value) {
+ // If the same constant function is being added we can simply
+ // transition to the target map.
+ self->set_map(transition_map);
+ result = constant_function;
+ } else {
+ // Otherwise, replace with a map transition to a new map with a FIELD,
+ // even if the value is a constant function.
+ result = self->ConvertTransitionToMapTransition(
+ lookup->GetTransitionIndex(), *name, *value, attributes);
+ }
}
- // Otherwise, replace with a MAP_TRANSITION to a new map with a
- // FIELD, even if the value is a constant function.
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
+ break;
}
- case NULL_DESCRIPTOR:
- case ELEMENTS_TRANSITION:
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case HANDLER:
+ case NONEXISTENT:
UNREACHABLE();
- return value;
}
- UNREACHABLE(); // keep the compiler happy
- return value;
+
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult)) return result;
+
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ if (lookup->IsTransition()) {
+ EnqueueChangeRecord(self, "new", name, old_value);
+ } else {
+ LookupResult new_lookup(self->GetIsolate());
+ self->LocalLookup(*name, &new_lookup);
+ ASSERT(!new_lookup.GetLazyValue()->IsTheHole());
+ if (!new_lookup.GetLazyValue()->SameValue(*old_value)) {
+ EnqueueChangeRecord(self, "updated", name, old_value);
+ }
+ }
+ }
+
+ return *hresult;
}
@@ -3030,21 +3039,22 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
- String* name,
- Object* value,
+ String* name_raw,
+ Object* value_raw,
PropertyAttributes attributes) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
Isolate* isolate = GetIsolate();
- LookupResult result(isolate);
- LocalLookup(name, &result);
+ LookupResult lookup(isolate);
+ LocalLookup(name_raw, &lookup);
+ if (!lookup.IsFound()) map()->LookupTransition(this, name_raw, &lookup);
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&result,
- name,
- value,
+ if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(&lookup,
+ name_raw,
+ value_raw,
false,
kNonStrictMode);
}
@@ -3052,58 +3062,109 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return value;
+ if (proto->IsNull()) return value_raw;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
- name,
- value,
+ name_raw,
+ value_raw,
attributes);
}
// Check for accessor in prototype chain removed here in clone.
- if (!result.IsFound()) {
+ if (!lookup.IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name, value, attributes, kNonStrictMode);
+ return AddProperty(name_raw, value_raw, attributes, kNonStrictMode);
}
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ // From this point on everything needs to be handlified.
+ HandleScope scope(GetIsolate());
+ Handle<JSObject> self(this);
+ Handle<String> name(name_raw);
+ Handle<Object> value(value_raw);
+
+ Handle<Object> old_value(isolate->heap()->the_hole_value());
+ PropertyAttributes old_attributes = ABSENT;
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ old_value = handle(lookup.GetLazyValue());
+ old_attributes = lookup.GetAttributes();
+ }
// Check of IsReadOnly removed from here in clone.
- switch (result.type()) {
- case NORMAL:
- return SetNormalizedProperty(name, value, details);
+ MaybeObject* result = *value;
+ switch (lookup.type()) {
+ case NORMAL: {
+ PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ result = self->SetNormalizedProperty(*name, *value, details);
+ break;
+ }
case FIELD:
- return FastPropertyAtPut(result.GetFieldIndex(), value);
- case MAP_TRANSITION:
- if (attributes == result.GetAttributes()) {
- // Only use map transition if the attributes match.
- return AddFastPropertyUsingMap(result.GetTransitionMap(),
- name,
- value);
- }
- return ConvertDescriptorToField(name, value, attributes);
+ result = self->FastPropertyAtPut(lookup.GetFieldIndex(), *value);
+ break;
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
- if (value == result.GetConstantFunction()) return value;
- // Preserve the attributes of this existing property.
- attributes = result.GetAttributes();
- return ConvertDescriptorToField(name, value, attributes);
+ if (*value != lookup.GetConstantFunction()) {
+ // Preserve the attributes of this existing property.
+ attributes = lookup.GetAttributes();
+ result = self->ConvertDescriptorToField(*name, *value, attributes);
+ }
+ break;
case CALLBACKS:
case INTERCEPTOR:
// Override callback in clone
- return ConvertDescriptorToField(name, value, attributes);
- case CONSTANT_TRANSITION:
- // Replace with a MAP_TRANSITION to a new map with a FIELD, even
- // if the value is a function.
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
- case NULL_DESCRIPTOR:
- case ELEMENTS_TRANSITION:
- return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
+ result = self->ConvertDescriptorToField(*name, *value, attributes);
+ break;
+ case TRANSITION: {
+ Map* transition_map = lookup.GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
+
+ DescriptorArray* descriptors = transition_map->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+
+ if (details.type() == FIELD) {
+ if (attributes == details.attributes()) {
+ int field_index = descriptors->GetFieldIndex(descriptor);
+ result = self->AddFastPropertyUsingMap(
+ transition_map, *name, *value, field_index);
+ } else {
+ result = self->ConvertDescriptorToField(*name, *value, attributes);
+ }
+ } else if (details.type() == CALLBACKS) {
+ result = self->ConvertDescriptorToField(*name, *value, attributes);
+ } else {
+ ASSERT(details.type() == CONSTANT_FUNCTION);
+
+ // Replace transition to CONSTANT FUNCTION with a map transition to a
+ // new map with a FIELD, even if the value is a function.
+ result = self->ConvertTransitionToMapTransition(
+ lookup.GetTransitionIndex(), *name, *value, attributes);
+ }
+ break;
+ }
case HANDLER:
+ case NONEXISTENT:
UNREACHABLE();
}
- UNREACHABLE(); // keep the compiler happy
- return value;
+
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult)) return result;
+
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ if (lookup.IsTransition()) {
+ EnqueueChangeRecord(self, "new", name, old_value);
+ } else {
+ LookupResult new_lookup(isolate);
+ self->LocalLookup(*name, &new_lookup);
+ ASSERT(!new_lookup.GetLazyValue()->IsTheHole());
+ if (old_value->IsTheHole() ||
+ new_lookup.GetAttributes() != old_attributes) {
+ EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ } else if (!new_lookup.GetLazyValue()->SameValue(*old_value)) {
+ EnqueueChangeRecord(self, "updated", name, old_value);
+ }
+ }
+ }
+
+ return *hresult;
}
@@ -3114,7 +3175,7 @@ PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty()) return result.GetAttributes();
+ if (result.IsFound()) return result.GetAttributes();
if (continue_search) {
// Continue searching via the prototype chain.
@@ -3184,44 +3245,46 @@ PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
String* key) {
uint32_t index = 0;
if (IsJSObject() && key->AsArrayIndex(&index)) {
- return JSObject::cast(this)->HasElementWithReceiver(receiver, index)
- ? NONE : ABSENT;
+ return JSObject::cast(this)->GetElementAttributeWithReceiver(
+ receiver, index, true);
}
// Named property.
- LookupResult result(GetIsolate());
- Lookup(key, &result);
- return GetPropertyAttribute(receiver, &result, key, true);
+ LookupResult lookup(GetIsolate());
+ Lookup(key, &lookup);
+ return GetPropertyAttributeForResult(receiver, &lookup, key, true);
}
-PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
- LookupResult* result,
- String* name,
- bool continue_search) {
+PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
+ JSReceiver* receiver,
+ LookupResult* lookup,
+ String* name,
+ bool continue_search) {
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
JSObject* this_obj = JSObject::cast(this);
Heap* heap = GetHeap();
if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
return this_obj->GetPropertyAttributeWithFailedAccessCheck(
- receiver, result, name, continue_search);
+ receiver, lookup, name, continue_search);
}
}
- if (result->IsProperty()) {
- switch (result->type()) {
+ if (lookup->IsFound()) {
+ switch (lookup->type()) {
case NORMAL: // fall through
case FIELD:
case CONSTANT_FUNCTION:
case CALLBACKS:
- return result->GetAttributes();
+ return lookup->GetAttributes();
case HANDLER: {
- return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler(
+ return JSProxy::cast(lookup->proxy())->GetPropertyAttributeWithHandler(
receiver, name);
}
case INTERCEPTOR:
- return result->holder()->GetPropertyAttributeWithInterceptor(
+ return lookup->holder()->GetPropertyAttributeWithInterceptor(
JSObject::cast(receiver), name, continue_search);
- default:
+ case TRANSITION:
+ case NONEXISTENT:
UNREACHABLE();
}
}
@@ -3233,13 +3296,113 @@ PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) {
// Check whether the name is an array index.
uint32_t index = 0;
if (IsJSObject() && name->AsArrayIndex(&index)) {
- if (JSObject::cast(this)->HasLocalElement(index)) return NONE;
- return ABSENT;
+ return GetLocalElementAttribute(index);
}
// Named property.
- LookupResult result(GetIsolate());
- LocalLookup(name, &result);
- return GetPropertyAttribute(this, &result, name, false);
+ LookupResult lookup(GetIsolate());
+ LocalLookup(name, &lookup);
+ return GetPropertyAttributeForResult(this, &lookup, name, false);
+}
+
+
+PropertyAttributes JSObject::GetElementAttributeWithReceiver(
+ JSReceiver* receiver, uint32_t index, bool continue_search) {
+ Isolate* isolate = GetIsolate();
+
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return ABSENT;
+ }
+ }
+
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return ABSENT;
+ ASSERT(proto->IsJSGlobalObject());
+ return JSObject::cast(proto)->GetElementAttributeWithReceiver(
+ receiver, index, continue_search);
+ }
+
+ // Check for lookup interceptor except when bootstrapping.
+ if (HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
+ return GetElementAttributeWithInterceptor(receiver, index, continue_search);
+ }
+
+ return GetElementAttributeWithoutInterceptor(
+ receiver, index, continue_search);
+}
+
+
+PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
+ JSReceiver* receiver, uint32_t index, bool continue_search) {
+ Isolate* isolate = GetIsolate();
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc;
+ HandleScope scope(isolate);
+ Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+ Handle<JSReceiver> hreceiver(receiver);
+ Handle<JSObject> holder(this);
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
+ if (!interceptor->query()->IsUndefined()) {
+ v8::IndexedPropertyQuery query =
+ v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+ v8::Handle<v8::Integer> result;
+ {
+ // Leaving JavaScript.
+ VMState state(isolate, EXTERNAL);
+ result = query(index, info);
+ }
+ if (!result.IsEmpty())
+ return static_cast<PropertyAttributes>(result->Int32Value());
+ } else if (!interceptor->getter()->IsUndefined()) {
+ v8::IndexedPropertyGetter getter =
+ v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(isolate, EXTERNAL);
+ result = getter(index, info);
+ }
+ if (!result.IsEmpty()) return DONT_ENUM;
+ }
+
+ return holder->GetElementAttributeWithoutInterceptor(
+ *hreceiver, index, continue_search);
+}
+
+
+PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
+ JSReceiver* receiver, uint32_t index, bool continue_search) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSReceiver> hreceiver(receiver);
+ Handle<JSObject> holder(this);
+ PropertyAttributes attr = holder->GetElementsAccessor()->GetAttributes(
+ *hreceiver, *holder, index);
+ if (attr != ABSENT) return attr;
+
+ if (holder->IsStringObjectWithCharacterAt(index)) {
+ return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ }
+
+ if (!continue_search) return ABSENT;
+
+ Object* pt = holder->GetPrototype();
+ if (pt->IsJSProxy()) {
+ // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+ return JSProxy::cast(pt)->GetElementAttributeWithHandler(*hreceiver, index);
+ }
+ if (pt->IsNull()) return ABSENT;
+ return JSObject::cast(pt)->GetElementAttributeWithReceiver(
+ *hreceiver, index, true);
}
@@ -3251,20 +3414,27 @@ MaybeObject* NormalizedMapCache::Get(JSObject* obj,
Object* result = get(index);
if (result->IsMap() &&
Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Map::cast(result)->SharedMapVerify();
}
+#endif
+#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
- // The cached map should match newly created normalized map bit-by-bit.
+ // The cached map should match newly created normalized map bit-by-bit,
+ // except for the code cache, which can contain some ics which can be
+ // applied to the shared map.
Object* fresh;
- { MaybeObject* maybe_fresh =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (maybe_fresh->ToObject(&fresh)) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kSize) == 0);
- }
+ MaybeObject* maybe_fresh =
+ fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (maybe_fresh->ToObject(&fresh)) {
+ ASSERT(memcmp(Map::cast(fresh)->address(),
+ Map::cast(result)->address(),
+ Map::kCodeCacheOffset) == 0);
+ int offset = Map::kCodeCacheOffset + kPointerSize;
+ ASSERT(memcmp(Map::cast(fresh)->address() + offset,
+ Map::cast(result)->address() + offset,
+ Map::kSize - offset) == 0);
}
}
#endif
@@ -3275,6 +3445,7 @@ MaybeObject* NormalizedMapCache::Get(JSObject* obj,
fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
+ ASSERT(Map::cast(result)->is_dictionary_map());
set(index, result);
isolate->counters()->normalized_maps()->Increment();
@@ -3338,24 +3509,25 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
Map* map_of_this = map();
// Allocate new content.
- int property_count = map_of_this->NumberOfDescribedProperties();
+ int real_size = map_of_this->NumberOfOwnDescriptors();
+ int property_count = real_size;
if (expected_additional_properties > 0) {
property_count += expected_additional_properties;
} else {
property_count += 2; // Make space for two more properties.
}
StringDictionary* dictionary;
- { MaybeObject* maybe_dictionary = StringDictionary::Allocate(property_count);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- }
+ MaybeObject* maybe_dictionary = StringDictionary::Allocate(property_count);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
DescriptorArray* descs = map_of_this->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case CONSTANT_FUNCTION: {
- PropertyDetails d =
- PropertyDetails(details.attributes(), NORMAL, details.index());
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ NORMAL,
+ details.descriptor_index());
Object* value = descs->GetConstantFunction(i);
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
@@ -3363,8 +3535,9 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
break;
}
case FIELD: {
- PropertyDetails d =
- PropertyDetails(details.attributes(), NORMAL, details.index());
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ NORMAL,
+ details.descriptor_index());
Object* value = FastPropertyAt(descs->GetFieldIndex(i));
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
@@ -3372,26 +3545,19 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
break;
}
case CALLBACKS: {
- if (!descs->IsProperty(i)) break;
Object* value = descs->GetCallbacksObject(i);
- if (value->IsAccessorPair()) {
- MaybeObject* maybe_copy =
- AccessorPair::cast(value)->CopyWithoutTransitions();
- if (!maybe_copy->To(&value)) return maybe_copy;
- }
+ details = details.set_pointer(0);
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, details);
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
break;
}
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
case INTERCEPTOR:
- case ELEMENTS_TRANSITION:
break;
case HANDLER:
case NORMAL:
+ case TRANSITION:
+ case NONEXISTENT:
UNREACHABLE();
break;
}
@@ -3400,15 +3566,14 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
Heap* current_heap = GetHeap();
// Copy the next enumeration index from instance descriptor.
- int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
- dictionary->SetNextEnumerationIndex(index);
+ dictionary->SetNextEnumerationIndex(real_size + 1);
Map* new_map;
- { MaybeObject* maybe_map =
- current_heap->isolate()->context()->global_context()->
- normalized_map_cache()->Get(this, mode);
- if (!maybe_map->To(&new_map)) return maybe_map;
- }
+ MaybeObject* maybe_map =
+ current_heap->isolate()->context()->native_context()->
+ normalized_map_cache()->Get(this, mode);
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ ASSERT(new_map->is_dictionary_map());
// We have now successfully allocated all the necessary objects.
// Changes can now be made with the guarantee that all of them take effect.
@@ -3424,9 +3589,7 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
-instance_size_delta);
}
-
set_map(new_map);
- new_map->clear_instance_descriptors();
set_properties(dictionary);
@@ -3479,8 +3642,7 @@ MaybeObject* JSObject::NormalizeElements() {
}
if (array->IsDictionary()) return array;
- ASSERT(HasFastElements() ||
- HasFastSmiOnlyElements() ||
+ ASSERT(HasFastSmiOrObjectElements() ||
HasFastDoubleElements() ||
HasFastArgumentsElements());
// Compute the effective length and allocate a new backing store.
@@ -3515,8 +3677,7 @@ MaybeObject* JSObject::NormalizeElements() {
if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
} else {
- ASSERT(old_map->has_fast_elements() ||
- old_map->has_fast_smi_only_elements());
+ ASSERT(old_map->has_fast_smi_or_object_elements());
value = FixedArray::cast(array)->get(i);
}
PropertyDetails details = PropertyDetails(NONE, NORMAL);
@@ -3575,7 +3736,7 @@ Smi* JSReceiver::GenerateIdentityHash() {
}
-MaybeObject* JSObject::SetIdentityHash(Object* hash, CreationFlag flag) {
+MaybeObject* JSObject::SetIdentityHash(Smi* hash, CreationFlag flag) {
MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
hash);
if (maybe->IsFailure()) return maybe;
@@ -3621,6 +3782,7 @@ MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
Object* JSObject::GetHiddenProperty(String* key) {
+ ASSERT(key->IsSymbol());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3630,22 +3792,31 @@ Object* JSObject::GetHiddenProperty(String* key) {
return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
}
ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
- ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
- if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) {
- return GetHeap()->undefined_value();
+ MaybeObject* hidden_lookup =
+ GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
+ Object* inline_value = hidden_lookup->ToObjectUnchecked();
+
+ if (inline_value->IsSmi()) {
+ // Handle inline-stored identity hash.
+ if (key == GetHeap()->identity_hash_symbol()) {
+ return inline_value;
+ } else {
+ return GetHeap()->undefined_value();
+ }
}
- StringDictionary* dictionary =
- StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
- int entry = dictionary->FindEntry(key);
- if (entry == StringDictionary::kNotFound) return GetHeap()->undefined_value();
- return dictionary->ValueAt(entry);
+
+ if (inline_value->IsUndefined()) return GetHeap()->undefined_value();
+
+ ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
+ Object* entry = hashtable->Lookup(key);
+ if (entry->IsTheHole()) return GetHeap()->undefined_value();
+ return entry;
}
Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
- Handle<Object> value) {
+ Handle<String> key,
+ Handle<Object> value) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
obj->SetHiddenProperty(*key, *value),
Object);
@@ -3653,6 +3824,7 @@ Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
+ ASSERT(key->IsSymbol());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3662,27 +3834,29 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
}
ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(true);
- StringDictionary* dictionary;
- if (!hidden_lookup->To<StringDictionary>(&dictionary)) return hidden_lookup;
+ MaybeObject* hidden_lookup =
+ GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
+ Object* inline_value = hidden_lookup->ToObjectUnchecked();
- // If it was found, check if the key is already in the dictionary.
- int entry = dictionary->FindEntry(key);
- if (entry != StringDictionary::kNotFound) {
- // If key was found, just update the value.
- dictionary->ValueAtPut(entry, value);
- return this;
+ // If there is no backing store yet, store the identity hash inline.
+ if (value->IsSmi() &&
+ key == GetHeap()->identity_hash_symbol() &&
+ (inline_value->IsUndefined() || inline_value->IsSmi())) {
+ return SetHiddenPropertiesHashTable(value);
}
- // Key was not already in the dictionary, so add the entry.
- MaybeObject* insert_result = dictionary->Add(key,
- value,
- PropertyDetails(NONE, NORMAL));
- StringDictionary* new_dict;
- if (!insert_result->To<StringDictionary>(&new_dict)) return insert_result;
- if (new_dict != dictionary) {
+
+ hidden_lookup = GetHiddenPropertiesHashTable(CREATE_NEW_IF_ABSENT);
+ ObjectHashTable* hashtable;
+ if (!hidden_lookup->To(&hashtable)) return hidden_lookup;
+
+ // If it was found, check if the key is already in the dictionary.
+ MaybeObject* insert_result = hashtable->Put(key, value);
+ ObjectHashTable* new_table;
+ if (!insert_result->To(&new_table)) return insert_result;
+ if (new_table != hashtable) {
// If adding the key expanded the dictionary (i.e., Add returned a new
// dictionary), store it back to the object.
- MaybeObject* store_result = SetHiddenPropertiesDictionary(new_dict);
+ MaybeObject* store_result = SetHiddenPropertiesHashTable(new_table);
if (store_result->IsFailure()) return store_result;
}
// Return this to mark success.
@@ -3691,6 +3865,7 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
void JSObject::DeleteHiddenProperty(String* key) {
+ ASSERT(key->IsSymbol());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3700,18 +3875,19 @@ void JSObject::DeleteHiddenProperty(String* key) {
JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
return;
}
- MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
- ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
- if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return;
- StringDictionary* dictionary =
- StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
- int entry = dictionary->FindEntry(key);
- if (entry == StringDictionary::kNotFound) {
- // Key wasn't in dictionary. Deletion is a success.
- return;
- }
- // Key was in the dictionary. Remove it.
- dictionary->DeleteProperty(entry, JSReceiver::FORCE_DELETION);
+ ASSERT(!IsJSGlobalProxy());
+ MaybeObject* hidden_lookup =
+ GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
+ Object* inline_value = hidden_lookup->ToObjectUnchecked();
+
+ // We never delete (inline-stored) identity hashes.
+ ASSERT(key != GetHeap()->identity_hash_symbol());
+ if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
+
+ ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
+ MaybeObject* delete_result = hashtable->Put(key, GetHeap()->the_hole_value());
+ USE(delete_result);
+ ASSERT(!delete_result->IsFailure()); // Delete does not cause GC.
}
@@ -3722,77 +3898,102 @@ bool JSObject::HasHiddenProperties() {
}
-MaybeObject* JSObject::GetHiddenPropertiesDictionary(bool create_if_absent) {
+MaybeObject* JSObject::GetHiddenPropertiesHashTable(
+ InitializeHiddenProperties init_option) {
ASSERT(!IsJSGlobalProxy());
+ Object* inline_value;
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden symbol. Since the
// hidden symbols hash code is zero (and no other string has hash
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = this->map()->instance_descriptors();
- if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == GetHeap()->hidden_symbol())) {
- if (descriptors->GetType(0) == FIELD) {
- Object* hidden_store =
- this->FastPropertyAt(descriptors->GetFieldIndex(0));
- return StringDictionary::cast(hidden_store);
+ if (descriptors->number_of_descriptors() > 0) {
+ int sorted_index = descriptors->GetSortedKeyIndex(0);
+ if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol() &&
+ sorted_index < map()->NumberOfOwnDescriptors()) {
+ ASSERT(descriptors->GetType(sorted_index) == FIELD);
+ inline_value =
+ this->FastPropertyAt(descriptors->GetFieldIndex(sorted_index));
} else {
- ASSERT(descriptors->GetType(0) == NULL_DESCRIPTOR ||
- descriptors->GetType(0) == MAP_TRANSITION);
+ inline_value = GetHeap()->undefined_value();
}
+ } else {
+ inline_value = GetHeap()->undefined_value();
}
} else {
PropertyAttributes attributes;
// You can't install a getter on a property indexed by the hidden symbol,
// so we can be sure that GetLocalPropertyPostInterceptor returns a real
// object.
- Object* lookup =
+ inline_value =
GetLocalPropertyPostInterceptor(this,
GetHeap()->hidden_symbol(),
&attributes)->ToObjectUnchecked();
- if (!lookup->IsUndefined()) {
- return StringDictionary::cast(lookup);
- }
}
- if (!create_if_absent) return GetHeap()->undefined_value();
- const int kInitialSize = 5;
- MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize);
- StringDictionary* dictionary;
- if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc;
- // Using AddProperty or SetPropertyPostInterceptor here could fail, because
- // object might be non-extensible.
- return HasFastProperties()
- ? AddFastProperty(GetHeap()->hidden_symbol(), dictionary, DONT_ENUM)
- : AddSlowProperty(GetHeap()->hidden_symbol(), dictionary, DONT_ENUM);
+
+ if (init_option == ONLY_RETURN_INLINE_VALUE ||
+ inline_value->IsHashTable()) {
+ return inline_value;
+ }
+
+ ObjectHashTable* hashtable;
+ static const int kInitialCapacity = 4;
+ MaybeObject* maybe_obj =
+ ObjectHashTable::Allocate(kInitialCapacity,
+ ObjectHashTable::USE_CUSTOM_MINIMUM_CAPACITY);
+ if (!maybe_obj->To<ObjectHashTable>(&hashtable)) return maybe_obj;
+
+ if (inline_value->IsSmi()) {
+ // We were storing the identity hash inline and now allocated an actual
+ // dictionary. Put the identity hash into the new dictionary.
+ MaybeObject* insert_result =
+ hashtable->Put(GetHeap()->identity_hash_symbol(), inline_value);
+ ObjectHashTable* new_table;
+ if (!insert_result->To(&new_table)) return insert_result;
+ // We expect no resizing for the first insert.
+ ASSERT_EQ(hashtable, new_table);
+ }
+
+ MaybeObject* store_result =
+ SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+ hashtable,
+ DONT_ENUM,
+ kNonStrictMode,
+ OMIT_EXTENSIBILITY_CHECK);
+ if (store_result->IsFailure()) return store_result;
+ return hashtable;
}
-MaybeObject* JSObject::SetHiddenPropertiesDictionary(
- StringDictionary* dictionary) {
+MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
ASSERT(!IsJSGlobalProxy());
- ASSERT(HasHiddenProperties());
+ // We can store the identity hash inline iff there is no backing store
+ // for hidden properties yet.
+ ASSERT(HasHiddenProperties() != value->IsSmi());
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden symbol. Since the
// hidden symbols hash code is zero (and no other string has hash
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = this->map()->instance_descriptors();
- if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == GetHeap()->hidden_symbol())) {
- if (descriptors->GetType(0) == FIELD) {
- this->FastPropertyAtPut(descriptors->GetFieldIndex(0), dictionary);
+ if (descriptors->number_of_descriptors() > 0) {
+ int sorted_index = descriptors->GetSortedKeyIndex(0);
+ if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol() &&
+ sorted_index < map()->NumberOfOwnDescriptors()) {
+ ASSERT(descriptors->GetType(sorted_index) == FIELD);
+ this->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
+ value);
return this;
- } else {
- ASSERT(descriptors->GetType(0) == NULL_DESCRIPTOR ||
- descriptors->GetType(0) == MAP_TRANSITION);
}
}
}
MaybeObject* store_result =
SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- dictionary,
+ value,
DONT_ENUM,
- kNonStrictMode);
+ kNonStrictMode,
+ OMIT_EXTENSIBILITY_CHECK);
if (store_result->IsFailure()) return store_result;
return this;
}
@@ -3803,7 +4004,7 @@ MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
- if (!result.IsProperty()) return GetHeap()->true_value();
+ if (!result.IsFound()) return GetHeap()->true_value();
// Normalize object if needed.
Object* obj;
@@ -3837,7 +4038,9 @@ MaybeObject* JSObject::DeletePropertyWithInterceptor(String* name) {
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
}
MaybeObject* raw_result =
@@ -3872,7 +4075,9 @@ MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
MaybeObject* raw_result = this_handle->GetElementsAccessor()->Delete(
*this_handle,
@@ -3922,15 +4127,39 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
}
- if (HasIndexedInterceptor()) {
- // Skip interceptor if forcing deletion.
- if (mode != FORCE_DELETION) {
- return DeleteElementWithInterceptor(index);
+ // From this point on everything needs to be handlified.
+ HandleScope scope(isolate);
+ Handle<JSObject> self(this);
+
+ Handle<String> name;
+ Handle<Object> old_value(isolate->heap()->the_hole_value());
+ bool preexists = false;
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ name = isolate->factory()->Uint32ToString(index);
+ preexists = self->HasLocalElement(index);
+ if (preexists) {
+ // TODO(observe): only read & set old_value if it's not an accessor
+ old_value = Object::GetElement(self, index);
}
- mode = JSReceiver::FORCE_DELETION;
}
- return GetElementsAccessor()->Delete(this, index, mode);
+ MaybeObject* result;
+ // Skip interceptor if forcing deletion.
+ if (self->HasIndexedInterceptor() && mode != FORCE_DELETION) {
+ result = self->DeleteElementWithInterceptor(index);
+ } else {
+ result = self->GetElementsAccessor()->Delete(*self, index, mode);
+ }
+
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult)) return result;
+
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ if (preexists && !self->HasLocalElement(index))
+ EnqueueChangeRecord(self, "deleted", name, old_value);
+ }
+
+ return *hresult;
}
@@ -3964,38 +4193,60 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
return DeleteElement(index, mode);
- } else {
- LookupResult result(isolate);
- LocalLookup(name, &result);
- if (!result.IsProperty()) return isolate->heap()->true_value();
- // Ignore attributes if forcing a deletion.
- if (result.IsDontDelete() && mode != FORCE_DELETION) {
- if (mode == STRICT_DELETION) {
- // Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_delete_property", HandleVector(args, 2)));
- }
- return isolate->heap()->false_value();
- }
- // Check for interceptor.
- if (result.type() == INTERCEPTOR) {
- // Skip interceptor if forcing a deletion.
- if (mode == FORCE_DELETION) {
- return DeletePropertyPostInterceptor(name, mode);
- }
- return DeletePropertyWithInterceptor(name);
+ }
+
+ LookupResult lookup(isolate);
+ LocalLookup(name, &lookup);
+ if (!lookup.IsFound()) return isolate->heap()->true_value();
+ // Ignore attributes if forcing a deletion.
+ if (lookup.IsDontDelete() && mode != FORCE_DELETION) {
+ if (mode == STRICT_DELETION) {
+ // Deleting a non-configurable property in strict mode.
+ HandleScope scope(isolate);
+ Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_delete_property", HandleVector(args, 2)));
}
+ return isolate->heap()->false_value();
+ }
+
+ // From this point on everything needs to be handlified.
+ HandleScope scope(isolate);
+ Handle<JSObject> self(this);
+ Handle<String> hname(name);
+
+ Handle<Object> old_value(isolate->heap()->the_hole_value());
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ old_value = handle(lookup.GetLazyValue());
+ }
+ MaybeObject* result;
+
+ // Check for interceptor.
+ if (lookup.IsInterceptor()) {
+ // Skip interceptor if forcing a deletion.
+ if (mode == FORCE_DELETION) {
+ result = self->DeletePropertyPostInterceptor(*hname, mode);
+ } else {
+ result = self->DeletePropertyWithInterceptor(*hname);
+ }
+ } else {
// Normalize object if needed.
Object* obj;
- { MaybeObject* maybe_obj =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ result = self->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (!result->To(&obj)) return result;
// Make sure the properties are normalized before removing the entry.
- return DeleteNormalizedProperty(name, mode);
+ result = self->DeleteNormalizedProperty(*hname, mode);
}
+
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult)) return result;
+
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ if (!self->HasLocalProperty(*hname))
+ EnqueueChangeRecord(self, "deleted", hname, old_value);
+ }
+
+ return *hresult;
}
@@ -4018,9 +4269,9 @@ MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
- ASSERT(kind == FAST_ELEMENTS ||
+ ASSERT(IsFastObjectElementsKind(kind) ||
kind == DICTIONARY_ELEMENTS);
- if (kind == FAST_ELEMENTS) {
+ if (IsFastObjectElementsKind(kind)) {
int length = IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
: elements->length();
@@ -4072,12 +4323,15 @@ bool JSObject::ReferencesObject(Object* obj) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
// Raw pixels and external arrays do not reference other
// objects.
break;
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
break;
case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS: {
FixedArray* elements = FixedArray::cast(this->elements());
if (ReferencesObjectFromElements(elements, kind, obj)) return true;
@@ -4093,7 +4347,8 @@ bool JSObject::ReferencesObject(Object* obj) {
}
// Check the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
+ kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS :
+ FAST_HOLEY_ELEMENTS;
if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
break;
}
@@ -4103,15 +4358,15 @@ bool JSObject::ReferencesObject(Object* obj) {
if (IsJSFunction()) {
// Get the constructor function for arguments array.
JSObject* arguments_boilerplate =
- heap->isolate()->context()->global_context()->
+ heap->isolate()->context()->native_context()->
arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
- // Get the context and don't check if it is the global context.
+ // Get the context and don't check if it is the native context.
JSFunction* f = JSFunction::cast(this);
Context* context = f->context();
- if (context->IsGlobalContext()) {
+ if (context->IsNativeContext()) {
return false;
}
@@ -4187,9 +4442,9 @@ MaybeObject* JSObject::PreventExtensions() {
// Do a map transition, other objects with this map may still
// be extensible.
Map* new_map;
- { MaybeObject* maybe = map()->CopyDropTransitions();
- if (!maybe->To<Map>(&new_map)) return maybe;
- }
+ MaybeObject* maybe = map()->Copy();
+ if (!maybe->To(&new_map)) return maybe;
+
new_map->set_is_extensible(false);
set_map(new_map);
ASSERT(!map()->is_extensible());
@@ -4209,29 +4464,27 @@ bool JSReceiver::IsSimpleEnum() {
o = JSObject::cast(o)->GetPrototype()) {
if (!o->IsJSObject()) return false;
JSObject* curr = JSObject::cast(o);
- if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
+ int enum_length = curr->map()->EnumLength();
+ if (enum_length == Map::kInvalidEnumCache) return false;
ASSERT(!curr->HasNamedInterceptor());
ASSERT(!curr->HasIndexedInterceptor());
ASSERT(!curr->IsAccessCheckNeeded());
if (curr->NumberOfEnumElements() > 0) return false;
- if (curr != this) {
- FixedArray* curr_fixed_array =
- FixedArray::cast(curr->map()->instance_descriptors()->GetEnumCache());
- if (curr_fixed_array->length() > 0) return false;
- }
+ if (curr != this && enum_length != 0) return false;
}
return true;
}
-int Map::NumberOfDescribedProperties(PropertyAttributes filter) {
+int Map::NumberOfDescribedProperties(DescriptorFlag which,
+ PropertyAttributes filter) {
int result = 0;
DescriptorArray* descs = instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- if (descs->IsProperty(i) && (details.attributes() & filter) == 0) {
- result++;
- }
+ int limit = which == ALL_DESCRIPTORS
+ ? descs->number_of_descriptors()
+ : NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if ((descs->GetDetails(i).attributes() & filter) == 0) result++;
}
return result;
}
@@ -4239,10 +4492,9 @@ int Map::NumberOfDescribedProperties(PropertyAttributes filter) {
int Map::PropertyIndexFor(String* name) {
DescriptorArray* descs = instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (name->Equals(descs->GetKey(i)) && !descs->IsNullDescriptor(i)) {
- return descs->GetFieldIndex(i);
- }
+ int limit = NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if (name->Equals(descs->GetKey(i))) return descs->GetFieldIndex(i);
}
return -1;
}
@@ -4250,8 +4502,9 @@ int Map::PropertyIndexFor(String* name) {
int Map::NextFreePropertyIndex() {
int max_index = -1;
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
DescriptorArray* descs = instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < number_of_own_descriptors; i++) {
if (descs->GetType(i) == FIELD) {
int current_index = descs->GetFieldIndex(i);
if (current_index > max_index) max_index = current_index;
@@ -4263,8 +4516,9 @@ int Map::NextFreePropertyIndex() {
AccessorDescriptor* Map::FindAccessor(String* name) {
DescriptorArray* descs = instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (name->Equals(descs->GetKey(i)) && descs->GetType(i) == CALLBACKS) {
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ for (int i = 0; i < number_of_own_descriptors; i++) {
+ if (descs->GetType(i) == CALLBACKS && name->Equals(descs->GetKey(i))) {
return descs->GetCallbacks(i);
}
}
@@ -4322,7 +4576,8 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result,
}
-void JSReceiver::Lookup(String* name, LookupResult* result,
+void JSReceiver::Lookup(String* name,
+ LookupResult* result,
bool skip_fallback_interceptor) {
// Ecma-262 3rd 8.6.2.4
Heap* heap = GetHeap();
@@ -4332,20 +4587,20 @@ void JSReceiver::Lookup(String* name, LookupResult* result,
JSReceiver::cast(current)->LocalLookup(name,
result,
skip_fallback_interceptor);
- if (result->IsProperty()) return;
+ if (result->IsFound()) return;
}
result->NotFound();
}
-// Search object and it's prototype chain for callback properties.
-void JSObject::LookupCallback(String* name, LookupResult* result) {
+// Search object and its prototype chain for callback properties.
+void JSObject::LookupCallbackProperty(String* name, LookupResult* result) {
Heap* heap = GetHeap();
for (Object* current = this;
current != heap->null_value() && current->IsJSObject();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
- if (result->IsFound() && result->type() == CALLBACKS) return;
+ if (result->IsPropertyCallbacks()) return;
}
result->NotFound();
}
@@ -4382,9 +4637,12 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
Object* setter,
PropertyAttributes attributes) {
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -4445,11 +4703,16 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
MaybeObject* JSObject::CreateAccessorPairFor(String* name) {
LookupResult result(GetHeap()->isolate());
LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty() && result.type() == CALLBACKS) {
- ASSERT(!result.IsDontDelete());
+ if (result.IsPropertyCallbacks()) {
+ // Note that the result can actually have IsDontDelete() == true when we
+ // e.g. have to fall back to the slow case while adding a setter after
+ // successfully reusing a map transition for a getter. Nevertheless, this is
+ // OK, because the assertion only holds for the whole addition of both
+ // accessors, not for the addition of each part. See first comment in
+ // DefinePropertyAccessor below.
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->CopyWithoutTransitions();
+ return AccessorPair::cast(obj)->Copy();
}
}
return GetHeap()->AllocateAccessorPair();
@@ -4460,10 +4723,34 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name,
Object* getter,
Object* setter,
PropertyAttributes attributes) {
- AccessorPair* accessors;
- { MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
- if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+ // We could assert that the property is configurable here, but we would need
+ // to do a lookup, which seems to be a bit of overkill.
+ Heap* heap = GetHeap();
+ bool only_attribute_changes = getter->IsNull() && setter->IsNull();
+ if (HasFastProperties() && !only_attribute_changes &&
+ (map()->NumberOfOwnDescriptors() <
+ DescriptorArray::kMaxNumberOfDescriptors)) {
+ MaybeObject* getterOk = heap->undefined_value();
+ if (!getter->IsNull()) {
+ getterOk = DefineFastAccessor(name, ACCESSOR_GETTER, getter, attributes);
+ if (getterOk->IsFailure()) return getterOk;
+ }
+
+ MaybeObject* setterOk = heap->undefined_value();
+ if (getterOk != heap->null_value() && !setter->IsNull()) {
+ setterOk = DefineFastAccessor(name, ACCESSOR_SETTER, setter, attributes);
+ if (setterOk->IsFailure()) return setterOk;
+ }
+
+ if (getterOk != heap->null_value() && setterOk != heap->null_value()) {
+ return heap->undefined_value();
+ }
}
+
+ AccessorPair* accessors;
+ MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
+ if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+
accessors->SetComponents(getter, setter);
return SetPropertyCallback(name, accessors, attributes);
}
@@ -4474,14 +4761,14 @@ bool JSObject::CanSetCallback(String* name) {
GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
// Check if there is an API defined callback object which prohibits
- // callback overwriting in this object or it's prototype chain.
+ // callback overwriting in this object or its prototype chain.
// This mechanism is needed for instance in a browser setting, where
// certain accessors such as window.location should not be allowed
// to be overwritten because allowing overwriting could potentially
// cause security problems.
LookupResult callback_result(GetIsolate());
- LookupCallback(name, &callback_result);
- if (callback_result.IsProperty()) {
+ LookupCallbackProperty(name, &callback_result);
+ if (callback_result.IsFound()) {
Object* obj = callback_result.GetCallbackObject();
if (obj->IsAccessorInfo() &&
AccessorInfo::cast(obj)->prohibits_overwriting()) {
@@ -4535,17 +4822,17 @@ MaybeObject* JSObject::SetPropertyCallback(String* name,
Object* structure,
PropertyAttributes attributes) {
// Normalize object to make this operation simple.
- { MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
+ MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (maybe_ok->IsFailure()) return maybe_ok;
// For the global object allocate a new map to invalidate the global inline
// caches which have a global property cell reference directly in the code.
if (IsGlobalObject()) {
Map* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- }
+ MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ ASSERT(new_map->is_dictionary_map());
+
set_map(new_map);
// When running crankshaft, changing the map is not enough. We
// need to deoptimize all functions that rely on this global
@@ -4555,9 +4842,8 @@ MaybeObject* JSObject::SetPropertyCallback(String* name,
// Update the dictionary with the new CALLBACKS property.
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
- { MaybeObject* maybe_ok = SetNormalizedProperty(name, structure, details);
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
+ maybe_ok = SetNormalizedProperty(name, structure, details);
+ if (maybe_ok->IsFailure()) return maybe_ok;
return GetHeap()->undefined_value();
}
@@ -4573,14 +4859,14 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
object->DefineAccessor(*name, *getter, *setter, attributes));
}
-MaybeObject* JSObject::DefineAccessor(String* name,
- Object* getter,
- Object* setter,
+MaybeObject* JSObject::DefineAccessor(String* name_raw,
+ Object* getter_raw,
+ Object* setter_raw,
PropertyAttributes attributes) {
Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ !isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
return isolate->heap()->undefined_value();
}
@@ -4590,7 +4876,7 @@ MaybeObject* JSObject::DefineAccessor(String* name,
if (proto->IsNull()) return this;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->DefineAccessor(
- name, getter, setter, attributes);
+ name_raw, getter_raw, setter_raw, attributes);
}
// Make sure that the top context does not change when doing callbacks or
@@ -4598,14 +4884,160 @@ MaybeObject* JSObject::DefineAccessor(String* name,
AssertNoContextChange ncc;
// Try to flatten before operating on the string.
- name->TryFlatten();
+ name_raw->TryFlatten();
- if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
+ if (!CanSetCallback(name_raw)) return isolate->heap()->undefined_value();
+
+ // From this point on everything needs to be handlified.
+ HandleScope scope(GetIsolate());
+ Handle<JSObject> self(this);
+ Handle<String> name(name_raw);
+ Handle<Object> getter(getter_raw);
+ Handle<Object> setter(setter_raw);
uint32_t index = 0;
- return name->AsArrayIndex(&index) ?
- DefineElementAccessor(index, getter, setter, attributes) :
- DefinePropertyAccessor(name, getter, setter, attributes);
+ bool is_element = name->AsArrayIndex(&index);
+
+ Handle<Object> old_value(isolate->heap()->the_hole_value());
+ bool preexists = false;
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ if (is_element) {
+ preexists = HasLocalElement(index);
+ if (preexists) {
+ // TODO(observe): distinguish the case where it's an accessor
+ old_value = Object::GetElement(self, index);
+ }
+ } else {
+ LookupResult lookup(isolate);
+ LocalLookup(*name, &lookup);
+ preexists = lookup.IsProperty();
+ if (preexists) old_value = handle(lookup.GetLazyValue());
+ }
+ }
+
+ MaybeObject* result = is_element ?
+ self->DefineElementAccessor(index, *getter, *setter, attributes) :
+ self->DefinePropertyAccessor(*name, *getter, *setter, attributes);
+
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult)) return result;
+
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ const char* type = preexists ? "reconfigured" : "new";
+ EnqueueChangeRecord(self, type, name, old_value);
+ }
+
+ return *hresult;
+}
+
+
+static MaybeObject* TryAccessorTransition(JSObject* self,
+ Map* transitioned_map,
+ int target_descriptor,
+ AccessorComponent component,
+ Object* accessor,
+ PropertyAttributes attributes) {
+ DescriptorArray* descs = transitioned_map->instance_descriptors();
+ PropertyDetails details = descs->GetDetails(target_descriptor);
+
+ // If the transition target was not callbacks, fall back to the slow case.
+ if (details.type() != CALLBACKS) return self->GetHeap()->null_value();
+ Object* descriptor = descs->GetCallbacksObject(target_descriptor);
+ if (!descriptor->IsAccessorPair()) return self->GetHeap()->null_value();
+
+ Object* target_accessor = AccessorPair::cast(descriptor)->get(component);
+ PropertyAttributes target_attributes = details.attributes();
+
+ // Reuse transition if adding same accessor with same attributes.
+ if (target_accessor == accessor && target_attributes == attributes) {
+ self->set_map(transitioned_map);
+ return self;
+ }
+
+ // If either not the same accessor, or not the same attributes, fall back to
+ // the slow case.
+ return self->GetHeap()->null_value();
+}
+
+
+MaybeObject* JSObject::DefineFastAccessor(String* name,
+ AccessorComponent component,
+ Object* accessor,
+ PropertyAttributes attributes) {
+ ASSERT(accessor->IsSpecFunction() || accessor->IsUndefined());
+ LookupResult result(GetIsolate());
+ LocalLookup(name, &result);
+
+ if (result.IsFound()
+ && !result.IsPropertyCallbacks()
+ && !result.IsTransition()) return GetHeap()->null_value();
+
+ // Return success if the same accessor with the same attributes already exist.
+ AccessorPair* source_accessors = NULL;
+ if (result.IsPropertyCallbacks()) {
+ Object* callback_value = result.GetCallbackObject();
+ if (callback_value->IsAccessorPair()) {
+ source_accessors = AccessorPair::cast(callback_value);
+ Object* entry = source_accessors->get(component);
+ if (entry == accessor && result.GetAttributes() == attributes) {
+ return this;
+ }
+ } else {
+ return GetHeap()->null_value();
+ }
+
+ int descriptor_number = result.GetDescriptorIndex();
+
+ map()->LookupTransition(this, name, &result);
+
+ if (result.IsFound()) {
+ Map* target = result.GetTransitionTarget();
+ ASSERT(target->NumberOfOwnDescriptors() ==
+ map()->NumberOfOwnDescriptors());
+ // This works since descriptors are sorted in order of addition.
+ ASSERT(map()->instance_descriptors()->GetKey(descriptor_number) == name);
+ return TryAccessorTransition(
+ this, target, descriptor_number, component, accessor, attributes);
+ }
+ } else {
+ // If not, lookup a transition.
+ map()->LookupTransition(this, name, &result);
+
+ // If there is a transition, try to follow it.
+ if (result.IsFound()) {
+ Map* target = result.GetTransitionTarget();
+ int descriptor_number = target->LastAdded();
+ ASSERT(target->instance_descriptors()->GetKey(descriptor_number)
+ ->Equals(name));
+ return TryAccessorTransition(
+ this, target, descriptor_number, component, accessor, attributes);
+ }
+ }
+
+ // If there is no transition yet, add a transition to the a new accessor pair
+ // containing the accessor.
+ AccessorPair* accessors;
+ MaybeObject* maybe_accessors;
+
+ // Allocate a new pair if there were no source accessors. Otherwise, copy the
+ // pair and modify the accessor.
+ if (source_accessors != NULL) {
+ maybe_accessors = source_accessors->Copy();
+ } else {
+ maybe_accessors = GetHeap()->AllocateAccessorPair();
+ }
+ if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+ accessors->set(component, accessor);
+
+ CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
+
+ Map* new_map;
+ MaybeObject* maybe_new_map =
+ map()->CopyInsertDescriptor(&new_accessors_desc, INSERT_TRANSITION);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ set_map(new_map);
+ return this;
}
@@ -4633,9 +5065,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
// Try to flatten before operating on the string.
name->TryFlatten();
- if (!CanSetCallback(name)) {
- return isolate->heap()->undefined_value();
- }
+ if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
@@ -4645,9 +5075,12 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
// Accessors overwrite previous callbacks (cf. with getters/setters).
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@@ -4668,23 +5101,22 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
break;
}
- { MaybeObject* maybe_ok =
- SetElementCallback(index, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
+ MaybeObject* maybe_ok =
+ SetElementCallback(index, info, info->property_attributes());
+ if (maybe_ok->IsFailure()) return maybe_ok;
} else {
// Lookup the name.
LookupResult result(isolate);
LocalLookup(name, &result);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
- if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
+ if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
return isolate->heap()->undefined_value();
}
- { MaybeObject* maybe_ok =
- SetPropertyCallback(name, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
- }
+
+ MaybeObject* maybe_ok =
+ SetPropertyCallback(name, info, info->property_attributes());
+ if (maybe_ok->IsFailure()) return maybe_ok;
}
return this;
@@ -4710,9 +5142,9 @@ Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
if (name->AsArrayIndex(&index)) {
for (Object* obj = this;
obj != heap->null_value();
- obj = JSObject::cast(obj)->GetPrototype()) {
- JSObject* js_object = JSObject::cast(obj);
- if (js_object->HasDictionaryElements()) {
+ obj = JSReceiver::cast(obj)->GetPrototype()) {
+ if (obj->IsJSObject() && JSObject::cast(obj)->HasDictionaryElements()) {
+ JSObject* js_object = JSObject::cast(obj);
SeededNumberDictionary* dictionary = js_object->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
@@ -4727,12 +5159,12 @@ Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
} else {
for (Object* obj = this;
obj != heap->null_value();
- obj = JSObject::cast(obj)->GetPrototype()) {
+ obj = JSReceiver::cast(obj)->GetPrototype()) {
LookupResult result(heap->isolate());
- JSObject::cast(obj)->LocalLookup(name, &result);
- if (result.IsProperty()) {
+ JSReceiver::cast(obj)->LocalLookup(name, &result);
+ if (result.IsFound()) {
if (result.IsReadOnly()) return heap->undefined_value();
- if (result.type() == CALLBACKS) {
+ if (result.IsPropertyCallbacks()) {
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
return AccessorPair::cast(obj)->GetComponent(component);
@@ -4747,8 +5179,9 @@ Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
Object* JSObject::SlowReverseLookup(Object* value) {
if (HasFastProperties()) {
+ int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < number_of_own_descriptors; i++) {
if (descs->GetType(i) == FIELD) {
if (FastPropertyAt(descs->GetFieldIndex(i)) == value) {
return descs->GetKey(i);
@@ -4766,45 +5199,21 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
-MaybeObject* Map::CopyDropDescriptors() {
- Heap* heap = GetHeap();
- Object* result;
- { MaybeObject* maybe_result =
- heap->AllocateMap(instance_type(), instance_size());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Map::cast(result)->set_prototype(prototype());
- Map::cast(result)->set_constructor(constructor());
- // Don't copy descriptors, so map transitions always remain a forest.
- // If we retained the same descriptors we would have two maps
- // pointing to the same transition which is bad because the garbage
- // collector relies on being able to reverse pointers from transitions
- // to maps. If properties need to be retained use CopyDropTransitions.
- Map::cast(result)->clear_instance_descriptors();
- // Please note instance_type and instance_size are set when allocated.
- Map::cast(result)->set_inobject_properties(inobject_properties());
- Map::cast(result)->set_unused_property_fields(unused_property_fields());
-
- // If the map has pre-allocated properties always start out with a descriptor
- // array describing these properties.
- if (pre_allocated_property_fields() > 0) {
- ASSERT(constructor()->IsJSFunction());
- JSFunction* ctor = JSFunction::cast(constructor());
- Object* descriptors;
- { MaybeObject* maybe_descriptors =
- ctor->initial_map()->instance_descriptors()->RemoveTransitions();
- if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors;
- }
- Map::cast(result)->set_instance_descriptors(
- DescriptorArray::cast(descriptors));
- Map::cast(result)->set_pre_allocated_property_fields(
- pre_allocated_property_fields());
- }
- Map::cast(result)->set_bit_field(bit_field());
- Map::cast(result)->set_bit_field2(bit_field2());
- Map::cast(result)->set_bit_field3(bit_field3());
- Map::cast(result)->set_is_shared(false);
- Map::cast(result)->ClearCodeCache(heap);
+MaybeObject* Map::RawCopy(int instance_size) {
+ Map* result;
+ MaybeObject* maybe_result =
+ GetHeap()->AllocateMap(instance_type(), instance_size);
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ result->set_prototype(prototype());
+ result->set_constructor(constructor());
+ result->set_bit_field(bit_field());
+ result->set_bit_field2(bit_field2());
+ int new_bit_field3 = bit_field3();
+ new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
+ new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
+ new_bit_field3 = EnumLengthBits::update(new_bit_field3, kInvalidEnumCache);
+ result->set_bit_field3(new_bit_field3);
return result;
}
@@ -4816,49 +5225,351 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
new_instance_size -= inobject_properties() * kPointerSize;
}
- Object* result;
- { MaybeObject* maybe_result =
- GetHeap()->AllocateMap(instance_type(), new_instance_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Map* result;
+ MaybeObject* maybe_result = RawCopy(new_instance_size);
+ if (!maybe_result->To(&result)) return maybe_result;
if (mode != CLEAR_INOBJECT_PROPERTIES) {
- Map::cast(result)->set_inobject_properties(inobject_properties());
+ result->set_inobject_properties(inobject_properties());
}
- Map::cast(result)->set_prototype(prototype());
- Map::cast(result)->set_constructor(constructor());
+ result->set_code_cache(code_cache());
+ result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
+ result->set_dictionary_map(true);
- Map::cast(result)->set_bit_field(bit_field());
- Map::cast(result)->set_bit_field2(bit_field2());
- Map::cast(result)->set_bit_field3(bit_field3());
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap && result->is_shared()) {
+ result->SharedMapVerify();
+ }
+#endif
- Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
+ return result;
+}
-#ifdef DEBUG
- if (FLAG_verify_heap && Map::cast(result)->is_shared()) {
- Map::cast(result)->SharedMapVerify();
+
+MaybeObject* Map::CopyDropDescriptors() {
+ Map* result;
+ MaybeObject* maybe_result = RawCopy(instance_size());
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ // Please note instance_type and instance_size are set when allocated.
+ result->set_inobject_properties(inobject_properties());
+ result->set_unused_property_fields(unused_property_fields());
+
+ result->set_pre_allocated_property_fields(pre_allocated_property_fields());
+ result->set_is_shared(false);
+ result->ClearCodeCache(GetHeap());
+ return result;
+}
+
+
+MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
+ Descriptor* descriptor) {
+ // Sanity check. This path is only to be taken if the map owns its descriptor
+ // array, implying that its NumberOfOwnDescriptors equals the number of
+ // descriptors in the descriptor array.
+ ASSERT(NumberOfOwnDescriptors() ==
+ instance_descriptors()->number_of_descriptors());
+ Map* result;
+ MaybeObject* maybe_result = CopyDropDescriptors();
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ String* name = descriptor->GetKey();
+
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions =
+ AddTransition(name, result, SIMPLE_TRANSITION);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+
+ int old_size = descriptors->number_of_descriptors();
+
+ DescriptorArray* new_descriptors;
+
+ if (descriptors->NumberOfSlackDescriptors() > 0) {
+ new_descriptors = descriptors;
+ new_descriptors->Append(descriptor);
+ } else {
+ // Descriptor arrays grow by 50%.
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
+ old_size, old_size < 4 ? 1 : old_size / 2);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ DescriptorArray::WhitenessWitness witness(new_descriptors);
+
+ // Copy the descriptors, inserting a descriptor.
+ for (int i = 0; i < old_size; ++i) {
+ new_descriptors->CopyFrom(i, descriptors, i, witness);
+ }
+
+ new_descriptors->Append(descriptor, witness);
+
+ if (old_size > 0) {
+ // If the source descriptors had an enum cache we copy it. This ensures
+ // that the maps to which we push the new descriptor array back can rely
+ // on a cache always being available once it is set. If the map has more
+ // enumerated descriptors than available in the original cache, the cache
+ // will be lazily replaced by the extended cache when needed.
+ if (descriptors->HasEnumCache()) {
+ new_descriptors->CopyEnumCacheFrom(descriptors);
+ }
+
+ Map* map;
+ // Replace descriptors by new_descriptors in all maps that share it.
+ for (Object* current = GetBackPointer();
+ !current->IsUndefined();
+ current = map->GetBackPointer()) {
+ map = Map::cast(current);
+ if (map->instance_descriptors() != descriptors) break;
+ map->set_instance_descriptors(new_descriptors);
+ }
+
+ set_instance_descriptors(new_descriptors);
+ }
}
-#endif
+
+ result->SetBackPointer(this);
+ result->InitializeDescriptors(new_descriptors);
+ ASSERT(result->NumberOfOwnDescriptors() == NumberOfOwnDescriptors() + 1);
+
+ set_transitions(transitions);
+ set_owns_descriptors(false);
return result;
}
-MaybeObject* Map::CopyDropTransitions() {
- Object* new_map;
- { MaybeObject* maybe_new_map = CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
+ String* name,
+ TransitionFlag flag,
+ int descriptor_index) {
+ ASSERT(descriptors->IsSortedNoDuplicates());
+
+ Map* result;
+ MaybeObject* maybe_result = CopyDropDescriptors();
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ result->InitializeDescriptors(descriptors);
+
+ if (flag == INSERT_TRANSITION && CanHaveMoreTransitions()) {
+ TransitionArray* transitions;
+ SimpleTransitionFlag simple_flag =
+ (descriptor_index == descriptors->number_of_descriptors() - 1)
+ ? SIMPLE_TRANSITION
+ : FULL_TRANSITION;
+ MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+
+ set_transitions(transitions);
+ result->SetBackPointer(this);
}
- Object* descriptors;
- { MaybeObject* maybe_descriptors =
- instance_descriptors()->RemoveTransitions();
- if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors;
+
+ return result;
+}
+
+
+MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
+ if (flag == INSERT_TRANSITION) {
+ ASSERT(!HasElementsTransition() ||
+ ((elements_transition_map()->elements_kind() == DICTIONARY_ELEMENTS ||
+ IsExternalArrayElementsKind(
+ elements_transition_map()->elements_kind())) &&
+ (kind == DICTIONARY_ELEMENTS ||
+ IsExternalArrayElementsKind(kind))));
+ ASSERT(!IsFastElementsKind(kind) ||
+ IsMoreGeneralElementsKindTransition(elements_kind(), kind));
+ ASSERT(kind != elements_kind());
+ }
+
+ bool insert_transition =
+ flag == INSERT_TRANSITION && !HasElementsTransition();
+
+ if (insert_transition && owns_descriptors()) {
+ // In case the map owned its own descriptors, share the descriptors and
+ // transfer ownership to the new map.
+ Map* new_map;
+ MaybeObject* maybe_new_map = CopyDropDescriptors();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ MaybeObject* added_elements = set_elements_transition_map(new_map);
+ if (added_elements->IsFailure()) return added_elements;
+
+ new_map->set_elements_kind(kind);
+ new_map->InitializeDescriptors(instance_descriptors());
+ new_map->SetBackPointer(this);
+ set_owns_descriptors(false);
+ return new_map;
}
- cast(new_map)->set_instance_descriptors(DescriptorArray::cast(descriptors));
+
+ // In case the map did not own its own descriptors, a split is forced by
+ // copying the map; creating a new descriptor array cell.
+ // Create a new free-floating map only if we are not allowed to store it.
+ Map* new_map;
+ MaybeObject* maybe_new_map = Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
+ new_map->set_elements_kind(kind);
+
+ if (insert_transition) {
+ MaybeObject* added_elements = set_elements_transition_map(new_map);
+ if (added_elements->IsFailure()) return added_elements;
+ new_map->SetBackPointer(this);
+ }
+
return new_map;
}
+
+MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() {
+ if (pre_allocated_property_fields() == 0) return CopyDropDescriptors();
+
+ // If the map has pre-allocated properties always start out with a descriptor
+ // array describing these properties.
+ ASSERT(constructor()->IsJSFunction());
+ JSFunction* ctor = JSFunction::cast(constructor());
+ Map* map = ctor->initial_map();
+ DescriptorArray* descriptors = map->instance_descriptors();
+
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors =
+ descriptors->CopyUpTo(number_of_own_descriptors);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
+}
+
+
+MaybeObject* Map::Copy() {
+ DescriptorArray* descriptors = instance_descriptors();
+ DescriptorArray* new_descriptors;
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ MaybeObject* maybe_descriptors =
+ descriptors->CopyUpTo(number_of_own_descriptors);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
+}
+
+
+MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
+ TransitionFlag flag) {
+ DescriptorArray* descriptors = instance_descriptors();
+
+ // Ensure the key is a symbol.
+ MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
+ int old_size = NumberOfOwnDescriptors();
+ int new_size = old_size + 1;
+ descriptor->SetEnumerationIndex(new_size);
+
+ if (flag == INSERT_TRANSITION &&
+ owns_descriptors() &&
+ CanHaveMoreTransitions()) {
+ return ShareDescriptor(descriptors, descriptor);
+ }
+
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(old_size, 1);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ DescriptorArray::WhitenessWitness witness(new_descriptors);
+
+ // Copy the descriptors, inserting a descriptor.
+ for (int i = 0; i < old_size; ++i) {
+ new_descriptors->CopyFrom(i, descriptors, i, witness);
+ }
+
+ if (old_size != descriptors->number_of_descriptors()) {
+ new_descriptors->SetNumberOfDescriptors(new_size);
+ new_descriptors->Set(old_size, descriptor, witness);
+ new_descriptors->Sort();
+ } else {
+ new_descriptors->Append(descriptor, witness);
+ }
+
+ String* key = descriptor->GetKey();
+ int insertion_index = new_descriptors->number_of_descriptors() - 1;
+
+ return CopyReplaceDescriptors(new_descriptors, key, flag, insertion_index);
+}
+
+
+MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
+ TransitionFlag flag) {
+ DescriptorArray* old_descriptors = instance_descriptors();
+
+ // Ensure the key is a symbol.
+ MaybeObject* maybe_result = descriptor->KeyToSymbol();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // We replace the key if it is already present.
+ int index = old_descriptors->SearchWithCache(descriptor->GetKey(), this);
+ if (index != DescriptorArray::kNotFound) {
+ return CopyReplaceDescriptor(old_descriptors, descriptor, index, flag);
+ }
+ return CopyAddDescriptor(descriptor, flag);
+}
+
+
+MaybeObject* DescriptorArray::CopyUpTo(int enumeration_index) {
+ if (enumeration_index == 0) return GetHeap()->empty_descriptor_array();
+
+ int size = enumeration_index;
+
+ DescriptorArray* descriptors;
+ MaybeObject* maybe_descriptors = Allocate(size);
+ if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
+ DescriptorArray::WhitenessWitness witness(descriptors);
+
+ for (int i = 0; i < size; ++i) {
+ descriptors->CopyFrom(i, this, i, witness);
+ }
+
+ if (number_of_descriptors() != enumeration_index) descriptors->Sort();
+
+ return descriptors;
+}
+
+
+MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
+ Descriptor* descriptor,
+ int insertion_index,
+ TransitionFlag flag) {
+ // Ensure the key is a symbol.
+ MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
+ String* key = descriptor->GetKey();
+ ASSERT(key == descriptors->GetKey(insertion_index));
+
+ int new_size = NumberOfOwnDescriptors();
+ ASSERT(0 <= insertion_index && insertion_index < new_size);
+
+ PropertyDetails details = descriptors->GetDetails(insertion_index);
+ ASSERT_LE(details.descriptor_index(), new_size);
+ descriptor->SetEnumerationIndex(details.descriptor_index());
+
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(new_size);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+ DescriptorArray::WhitenessWitness witness(new_descriptors);
+
+ for (int i = 0; i < new_size; ++i) {
+ if (i == insertion_index) {
+ new_descriptors->Set(i, descriptor, witness);
+ } else {
+ new_descriptors->CopyFrom(i, descriptors, i, witness);
+ }
+ }
+
+ // Re-sort if descriptors were removed.
+ if (new_size != descriptors->length()) new_descriptors->Sort();
+
+ return CopyReplaceDescriptors(new_descriptors, key, flag, insertion_index);
+}
+
+
void Map::UpdateCodeCache(Handle<Map> map,
Handle<String> name,
Handle<Code> code) {
@@ -4867,7 +5578,10 @@ void Map::UpdateCodeCache(Handle<Map> map,
map->UpdateCodeCache(*name, *code));
}
+
MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
+ ASSERT(!is_shared() || code->allowed_in_shared_map_code_cache());
+
// Allocate the code cache if not present.
if (code_cache()->IsFixedArray()) {
Object* result;
@@ -4913,78 +5627,43 @@ void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
// field of the contens array while it is running.
class IntrusiveMapTransitionIterator {
public:
- explicit IntrusiveMapTransitionIterator(DescriptorArray* descriptor_array)
- : descriptor_array_(descriptor_array) { }
+ explicit IntrusiveMapTransitionIterator(TransitionArray* transition_array)
+ : transition_array_(transition_array) { }
void Start() {
ASSERT(!IsIterating());
- if (HasContentArray()) *ContentHeader() = Smi::FromInt(0);
+ *TransitionArrayHeader() = Smi::FromInt(0);
}
bool IsIterating() {
- return HasContentArray() && (*ContentHeader())->IsSmi();
+ return (*TransitionArrayHeader())->IsSmi();
}
Map* Next() {
ASSERT(IsIterating());
- FixedArray* contents = ContentArray();
- // Attention, tricky index manipulation ahead: Every entry in the contents
- // array consists of a value/details pair, so the index is typically even.
- // An exception is made for CALLBACKS entries: An even index means we look
- // at its getter, and an odd index means we look at its setter.
- int index = Smi::cast(*ContentHeader())->value();
- while (index < contents->length()) {
- PropertyDetails details(Smi::cast(contents->get(index | 1)));
- switch (details.type()) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
- // We definitely have a map transition.
- *ContentHeader() = Smi::FromInt(index + 2);
- return static_cast<Map*>(contents->get(index));
- case CALLBACKS: {
- // We might have a map transition in a getter or in a setter.
- AccessorPair* accessors =
- static_cast<AccessorPair*>(contents->get(index & ~1));
- Object* accessor =
- ((index & 1) == 0) ? accessors->getter() : accessors->setter();
- index++;
- if (accessor->IsMap()) {
- *ContentHeader() = Smi::FromInt(index);
- return static_cast<Map*>(accessor);
- }
- break;
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- // We definitely have no map transition.
- index += 2;
- break;
- }
+ int index = Smi::cast(*TransitionArrayHeader())->value();
+ int number_of_transitions = transition_array_->number_of_transitions();
+ while (index < number_of_transitions) {
+ *TransitionArrayHeader() = Smi::FromInt(index + 1);
+ return transition_array_->GetTarget(index);
}
- *ContentHeader() = descriptor_array_->GetHeap()->fixed_array_map();
+
+ if (index == number_of_transitions &&
+ transition_array_->HasElementsTransition()) {
+ Map* elements_transition = transition_array_->elements_transition();
+ *TransitionArrayHeader() = Smi::FromInt(index + 1);
+ return elements_transition;
+ }
+ *TransitionArrayHeader() = transition_array_->GetHeap()->fixed_array_map();
return NULL;
}
private:
- bool HasContentArray() {
- return descriptor_array_-> length() > DescriptorArray::kContentArrayIndex;
- }
-
- FixedArray* ContentArray() {
- Object* array = descriptor_array_->get(DescriptorArray::kContentArrayIndex);
- return static_cast<FixedArray*>(array);
- }
-
- Object** ContentHeader() {
- return HeapObject::RawField(ContentArray(), DescriptorArray::kMapOffset);
+ Object** TransitionArrayHeader() {
+ return HeapObject::RawField(transition_array_, TransitionArray::kMapOffset);
}
- DescriptorArray* descriptor_array_;
+ TransitionArray* transition_array_;
};
@@ -4997,11 +5676,11 @@ class IntrusivePrototypeTransitionIterator {
void Start() {
ASSERT(!IsIterating());
- if (HasTransitions()) *Header() = Smi::FromInt(0);
+ *Header() = Smi::FromInt(0);
}
bool IsIterating() {
- return HasTransitions() && (*Header())->IsSmi();
+ return (*Header())->IsSmi();
}
Map* Next() {
@@ -5016,23 +5695,17 @@ class IntrusivePrototypeTransitionIterator {
}
private:
- bool HasTransitions() {
- return proto_trans_->map()->IsSmi() || proto_trans_->IsFixedArray();
- }
-
Object** Header() {
return HeapObject::RawField(proto_trans_, FixedArray::kMapOffset);
}
int NumberOfTransitions() {
- ASSERT(HasTransitions());
FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
Object* num = proto_trans->get(Map::kProtoTransitionNumberOfEntriesOffset);
return Smi::cast(num)->value();
}
Map* GetTransition(int transitionNumber) {
- ASSERT(HasTransitions());
FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
return Map::cast(proto_trans->get(IndexFor(transitionNumber)));
}
@@ -5085,25 +5758,40 @@ class TraversableMap : public Map {
// Start iterating over this map's children, possibly destroying a FixedArray
// map (see explanation above).
void ChildIteratorStart() {
- IntrusiveMapTransitionIterator(instance_descriptors()).Start();
- IntrusivePrototypeTransitionIterator(
- unchecked_prototype_transitions()).Start();
+ if (HasTransitionArray()) {
+ if (HasPrototypeTransitions()) {
+ IntrusivePrototypeTransitionIterator(GetPrototypeTransitions()).Start();
+ }
+
+ IntrusiveMapTransitionIterator(transitions()).Start();
+ }
}
// If we have an unvisited child map, return that one and advance. If we have
// none, return NULL and reset any destroyed FixedArray maps.
TraversableMap* ChildIteratorNext() {
- IntrusiveMapTransitionIterator descriptor_iterator(instance_descriptors());
- if (descriptor_iterator.IsIterating()) {
- Map* next = descriptor_iterator.Next();
- if (next != NULL) return static_cast<TraversableMap*>(next);
+ TransitionArray* transition_array = unchecked_transition_array();
+ if (!transition_array->map()->IsSmi() &&
+ !transition_array->IsTransitionArray()) {
+ return NULL;
}
- IntrusivePrototypeTransitionIterator
- proto_iterator(unchecked_prototype_transitions());
- if (proto_iterator.IsIterating()) {
- Map* next = proto_iterator.Next();
+
+ if (transition_array->HasPrototypeTransitions()) {
+ HeapObject* proto_transitions =
+ transition_array->UncheckedPrototypeTransitions();
+ IntrusivePrototypeTransitionIterator proto_iterator(proto_transitions);
+ if (proto_iterator.IsIterating()) {
+ Map* next = proto_iterator.Next();
+ if (next != NULL) return static_cast<TraversableMap*>(next);
+ }
+ }
+
+ IntrusiveMapTransitionIterator transition_iterator(transition_array);
+ if (transition_iterator.IsIterating()) {
+ Map* next = transition_iterator.Next();
if (next != NULL) return static_cast<TraversableMap*>(next);
}
+
return NULL;
}
};
@@ -5134,7 +5822,7 @@ MaybeObject* CodeCache::Update(String* name, Code* code) {
// The number of monomorphic stubs for normal load/store/call IC's can grow to
// a large number and therefore they need to go into a hash table. They are
// used to load global properties from cells.
- if (code->type() == NORMAL) {
+ if (code->type() == Code::NORMAL) {
// Make sure that a hash table is allocated for the normal load code cache.
if (normal_type_cache()->IsUndefined()) {
Object* result;
@@ -5225,7 +5913,7 @@ MaybeObject* CodeCache::UpdateNormalTypeCache(String* name, Code* code) {
Object* CodeCache::Lookup(String* name, Code::Flags flags) {
- if (Code::ExtractTypeFromFlags(flags) == NORMAL) {
+ if (Code::ExtractTypeFromFlags(flags) == Code::NORMAL) {
return LookupNormalTypeCache(name, flags);
} else {
return LookupDefaultCache(name, flags);
@@ -5263,7 +5951,7 @@ Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) {
int CodeCache::GetIndex(Object* name, Code* code) {
- if (code->type() == NORMAL) {
+ if (code->type() == Code::NORMAL) {
if (normal_type_cache()->IsUndefined()) return -1;
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
return cache->GetIndex(String::cast(name), code->flags());
@@ -5279,7 +5967,7 @@ int CodeCache::GetIndex(Object* name, Code* code) {
void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
- if (code->type() == NORMAL) {
+ if (code->type() == Code::NORMAL) {
ASSERT(!normal_type_cache()->IsUndefined());
CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
ASSERT(cache->GetIndex(String::cast(name), code->flags()) == index);
@@ -5597,7 +6285,7 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
MaybeObject* maybe_result =
accessor->AddElementsToFixedArray(NULL, NULL, this, other);
FixedArray* result;
- if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
+ if (!maybe_result->To(&result)) return maybe_result;
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
@@ -5653,234 +6341,80 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
#endif
-MaybeObject* DescriptorArray::Allocate(int number_of_descriptors) {
+MaybeObject* DescriptorArray::Allocate(int number_of_descriptors, int slack) {
Heap* heap = Isolate::Current()->heap();
- if (number_of_descriptors == 0) {
- return heap->empty_descriptor_array();
- }
- // Allocate the array of keys.
- Object* array;
- { MaybeObject* maybe_array =
- heap->AllocateFixedArray(ToKeyIndex(number_of_descriptors));
- if (!maybe_array->ToObject(&array)) return maybe_array;
- }
// Do not use DescriptorArray::cast on incomplete object.
- FixedArray* result = FixedArray::cast(array);
+ int size = number_of_descriptors + slack;
+ if (size == 0) return heap->empty_descriptor_array();
+ FixedArray* result;
+ // Allocate the array of keys.
+ MaybeObject* maybe_array = heap->AllocateFixedArray(LengthFor(size));
+ if (!maybe_array->To(&result)) return maybe_array;
- // Allocate the content array and set it in the descriptor array.
- { MaybeObject* maybe_array =
- heap->AllocateFixedArray(number_of_descriptors << 1);
- if (!maybe_array->ToObject(&array)) return maybe_array;
- }
- result->set(kBitField3StorageIndex, Smi::FromInt(0));
- result->set(kContentArrayIndex, array);
- result->set(kEnumerationIndexIndex,
- Smi::FromInt(PropertyDetails::kInitialIndex));
+ result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
+ result->set(kEnumCacheIndex, Smi::FromInt(0));
return result;
}
+void DescriptorArray::ClearEnumCache() {
+ set(kEnumCacheIndex, Smi::FromInt(0));
+}
+
+
void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
FixedArray* new_cache,
Object* new_index_cache) {
ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
- if (HasEnumCache()) {
- FixedArray::cast(get(kEnumerationIndexIndex))->
- set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(get(kEnumerationIndexIndex))->
- set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
- } else {
- if (IsEmpty()) return; // Do nothing for empty descriptor array.
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
- NoWriteBarrierSet(FixedArray::cast(bridge_storage),
- kEnumCacheBridgeEnumIndex,
- get(kEnumerationIndexIndex));
- set(kEnumerationIndexIndex, bridge_storage);
- }
+ ASSERT(!IsEmpty());
+ ASSERT(!HasEnumCache() || new_cache->length() > GetEnumCache()->length());
+ FixedArray::cast(bridge_storage)->
+ set(kEnumCacheBridgeCacheIndex, new_cache);
+ FixedArray::cast(bridge_storage)->
+ set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
+ set(kEnumCacheIndex, bridge_storage);
}
-static bool InsertionPointFound(String* key1, String* key2) {
- return key1->Hash() > key2->Hash() || key1 == key2;
-}
-
-
-void DescriptorArray::CopyFrom(Handle<DescriptorArray> dst,
- int dst_index,
- Handle<DescriptorArray> src,
+void DescriptorArray::CopyFrom(int dst_index,
+ DescriptorArray* src,
int src_index,
const WhitenessWitness& witness) {
- CALL_HEAP_FUNCTION_VOID(dst->GetIsolate(),
- dst->CopyFrom(dst_index, *src, src_index, witness));
-}
-
-
-MaybeObject* DescriptorArray::CopyFrom(int dst_index,
- DescriptorArray* src,
- int src_index,
- const WhitenessWitness& witness) {
Object* value = src->GetValue(src_index);
PropertyDetails details = src->GetDetails(src_index);
- if (details.type() == CALLBACKS && value->IsAccessorPair()) {
- MaybeObject* maybe_copy =
- AccessorPair::cast(value)->CopyWithoutTransitions();
- if (!maybe_copy->To(&value)) return maybe_copy;
- }
Descriptor desc(src->GetKey(src_index), value, details);
Set(dst_index, &desc, witness);
- return this;
-}
-
-
-MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
- TransitionFlag transition_flag) {
- // Transitions are only kept when inserting another transition.
- // This precondition is not required by this function's implementation, but
- // is currently required by the semantics of maps, so we check it.
- // Conversely, we filter after replacing, so replacing a transition and
- // removing all other transitions is not supported.
- bool remove_transitions = transition_flag == REMOVE_TRANSITIONS;
- ASSERT(remove_transitions == !descriptor->ContainsTransition());
- ASSERT(descriptor->GetDetails().type() != NULL_DESCRIPTOR);
-
- // Ensure the key is a symbol.
- { MaybeObject* maybe_result = descriptor->KeyToSymbol();
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- int new_size = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsNullDescriptor(i)) continue;
- if (remove_transitions && IsTransitionOnly(i)) continue;
- new_size++;
- }
-
- // If key is in descriptor, we replace it in-place when filtering.
- // Count a null descriptor for key as inserted, not replaced.
- int index = Search(descriptor->GetKey());
- const bool replacing = (index != kNotFound);
- bool keep_enumeration_index = false;
- if (replacing) {
- // We are replacing an existing descriptor. We keep the enumeration
- // index of a visible property.
- PropertyType t = GetDetails(index).type();
- if (t == CONSTANT_FUNCTION ||
- t == FIELD ||
- t == CALLBACKS ||
- t == INTERCEPTOR) {
- keep_enumeration_index = true;
- } else if (remove_transitions) {
- // Replaced descriptor has been counted as removed if it is
- // a transition that will be replaced. Adjust count in this case.
- ++new_size;
- }
- } else {
- ++new_size;
- }
-
- DescriptorArray* new_descriptors;
- { MaybeObject* maybe_result = Allocate(new_size);
- if (!maybe_result->To(&new_descriptors)) return maybe_result;
- }
-
- DescriptorArray::WhitenessWitness witness(new_descriptors);
-
- // Set the enumeration index in the descriptors and set the enumeration index
- // in the result.
- int enumeration_index = NextEnumerationIndex();
- if (!descriptor->ContainsTransition()) {
- if (keep_enumeration_index) {
- descriptor->SetEnumerationIndex(GetDetails(index).index());
- } else {
- descriptor->SetEnumerationIndex(enumeration_index);
- ++enumeration_index;
- }
- }
- new_descriptors->SetNextEnumerationIndex(enumeration_index);
-
- // Copy the descriptors, filtering out transitions and null descriptors,
- // and inserting or replacing a descriptor.
- int to_index = 0;
- int insertion_index = -1;
- int from_index = 0;
- while (from_index < number_of_descriptors()) {
- if (insertion_index < 0 &&
- InsertionPointFound(GetKey(from_index), descriptor->GetKey())) {
- insertion_index = to_index++;
- if (replacing) from_index++;
- } else {
- if (!(IsNullDescriptor(from_index) ||
- (remove_transitions && IsTransitionOnly(from_index)))) {
- MaybeObject* copy_result =
- new_descriptors->CopyFrom(to_index++, this, from_index, witness);
- if (copy_result->IsFailure()) return copy_result;
- }
- from_index++;
- }
- }
- if (insertion_index < 0) insertion_index = to_index++;
- new_descriptors->Set(insertion_index, descriptor, witness);
-
- ASSERT(to_index == new_descriptors->number_of_descriptors());
- SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
-
- return new_descriptors;
}
-MaybeObject* DescriptorArray::RemoveTransitions() {
- // Allocate the new descriptor array.
- int new_number_of_descriptors = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsProperty(i)) new_number_of_descriptors++;
- }
- DescriptorArray* new_descriptors;
- { MaybeObject* maybe_result = Allocate(new_number_of_descriptors);
- if (!maybe_result->To(&new_descriptors)) return maybe_result;
- }
-
- // Copy the content.
- DescriptorArray::WhitenessWitness witness(new_descriptors);
- int next_descriptor = 0;
- for (int i = 0; i < number_of_descriptors(); i++) {
- if (IsProperty(i)) {
- MaybeObject* copy_result =
- new_descriptors->CopyFrom(next_descriptor++, this, i, witness);
- if (copy_result->IsFailure()) return copy_result;
- }
- }
- ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
-
- return new_descriptors;
-}
-
-
-void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
+// We need the whiteness witness since sort will reshuffle the entries in the
+// descriptor array. If the descriptor array were to be black, the shuffling
+// would move a slot that was already recorded as pointing into an evacuation
+// candidate. This would result in missing updates upon evacuation.
+void DescriptorArray::Sort() {
// In-place heap sort.
int len = number_of_descriptors();
-
+ // Reset sorting since the descriptor array might contain invalid pointers.
+ for (int i = 0; i < len; ++i) SetSortedKey(i, i);
// Bottom-up max-heap construction.
// Index of the last node with children
const int max_parent_index = (len / 2) - 1;
for (int i = max_parent_index; i >= 0; --i) {
int parent_index = i;
- const uint32_t parent_hash = GetKey(i)->Hash();
+ const uint32_t parent_hash = GetSortedKey(i)->Hash();
while (parent_index <= max_parent_index) {
int child_index = 2 * parent_index + 1;
- uint32_t child_hash = GetKey(child_index)->Hash();
+ uint32_t child_hash = GetSortedKey(child_index)->Hash();
if (child_index + 1 < len) {
- uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
+ uint32_t right_child_hash = GetSortedKey(child_index + 1)->Hash();
if (right_child_hash > child_hash) {
child_index++;
child_hash = right_child_hash;
}
}
if (child_hash <= parent_hash) break;
- NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
+ SwapSortedKeys(parent_index, child_index);
// Now element at child_index could be < its children.
parent_index = child_index; // parent_hash remains correct.
}
@@ -5889,95 +6423,45 @@ void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
// Extract elements and create sorted array.
for (int i = len - 1; i > 0; --i) {
// Put max element at the back of the array.
- NoIncrementalWriteBarrierSwapDescriptors(0, i);
+ SwapSortedKeys(0, i);
// Shift down the new top element.
int parent_index = 0;
- const uint32_t parent_hash = GetKey(parent_index)->Hash();
+ const uint32_t parent_hash = GetSortedKey(parent_index)->Hash();
const int max_parent_index = (i / 2) - 1;
while (parent_index <= max_parent_index) {
int child_index = parent_index * 2 + 1;
- uint32_t child_hash = GetKey(child_index)->Hash();
+ uint32_t child_hash = GetSortedKey(child_index)->Hash();
if (child_index + 1 < i) {
- uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
+ uint32_t right_child_hash = GetSortedKey(child_index + 1)->Hash();
if (right_child_hash > child_hash) {
child_index++;
child_hash = right_child_hash;
}
}
if (child_hash <= parent_hash) break;
- NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
+ SwapSortedKeys(parent_index, child_index);
parent_index = child_index;
}
}
+ ASSERT(IsSortedNoDuplicates());
}
-void DescriptorArray::Sort(const WhitenessWitness& witness) {
- SortUnchecked(witness);
- SLOW_ASSERT(IsSortedNoDuplicates());
-}
-
-
-int DescriptorArray::BinarySearch(String* name, int low, int high) {
- uint32_t hash = name->Hash();
-
- while (low <= high) {
- int mid = (low + high) / 2;
- String* mid_name = GetKey(mid);
- uint32_t mid_hash = mid_name->Hash();
-
- if (mid_hash > hash) {
- high = mid - 1;
- continue;
- }
- if (mid_hash < hash) {
- low = mid + 1;
- continue;
- }
- // Found an element with the same hash-code.
- ASSERT(hash == mid_hash);
- // There might be more, so we find the first one and
- // check them all to see if we have a match.
- if (name == mid_name && !IsNullDescriptor(mid)) return mid;
- while ((mid > low) && (GetKey(mid - 1)->Hash() == hash)) mid--;
- for (; (mid <= high) && (GetKey(mid)->Hash() == hash); mid++) {
- if (GetKey(mid)->Equals(name) && !IsNullDescriptor(mid)) return mid;
- }
- break;
- }
- return kNotFound;
-}
-
-
-int DescriptorArray::LinearSearch(String* name, int len) {
- uint32_t hash = name->Hash();
- for (int number = 0; number < len; number++) {
- String* entry = GetKey(number);
- if ((entry->Hash() == hash) &&
- name->Equals(entry) &&
- !IsNullDescriptor(number)) {
- return number;
- }
- }
- return kNotFound;
-}
-
-
-MaybeObject* AccessorPair::CopyWithoutTransitions() {
+MaybeObject* AccessorPair::Copy() {
Heap* heap = GetHeap();
AccessorPair* copy;
- { MaybeObject* maybe_copy = heap->AllocateAccessorPair();
- if (!maybe_copy->To(&copy)) return maybe_copy;
- }
- copy->set_getter(getter()->IsMap() ? heap->the_hole_value() : getter());
- copy->set_setter(setter()->IsMap() ? heap->the_hole_value() : setter());
+ MaybeObject* maybe_copy = heap->AllocateAccessorPair();
+ if (!maybe_copy->To(&copy)) return maybe_copy;
+
+ copy->set_getter(getter());
+ copy->set_setter(setter());
return copy;
}
Object* AccessorPair::GetComponent(AccessorComponent component) {
- Object* accessor = (component == ACCESSOR_GETTER) ? getter() : setter();
- return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
+ Object* accessor = get(component);
+ return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
}
@@ -6003,9 +6487,9 @@ bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
if (other->IsEmpty()) return false;
if (length() != other->length()) return false;
for (int i = 0; i < length(); ++i) {
- if (get(i) != other->get(i) && i != kContentArrayIndex) return false;
+ if (get(i) != other->get(i)) return false;
}
- return GetContentArray()->IsEqualTo(other->GetContentArray());
+ return true;
}
#endif
@@ -6037,7 +6521,7 @@ String::FlatContent String::GetFlatContent() {
ASSERT(shape.representation_tag() != kConsStringTag &&
shape.representation_tag() != kSlicedStringTag);
}
- if (shape.encoding_tag() == kAsciiStringTag) {
+ if (shape.encoding_tag() == kOneByteStringTag) {
const char* start;
if (shape.representation_tag() == kSeqStringTag) {
start = SeqAsciiString::cast(string)->GetChars();
@@ -6726,7 +7210,7 @@ void String::WriteToFlat(String* src,
while (true) {
ASSERT(0 <= from && from <= to && to <= source->length());
switch (StringShape(source).full_representation_tag()) {
- case kAsciiStringTag | kExternalStringTag: {
+ case kOneByteStringTag | kExternalStringTag: {
CopyChars(sink,
ExternalAsciiString::cast(source)->GetChars() + from,
to - from);
@@ -6740,7 +7224,7 @@ void String::WriteToFlat(String* src,
to - from);
return;
}
- case kAsciiStringTag | kSeqStringTag: {
+ case kOneByteStringTag | kSeqStringTag: {
CopyChars(sink,
SeqAsciiString::cast(source)->GetChars() + from,
to - from);
@@ -6752,7 +7236,7 @@ void String::WriteToFlat(String* src,
to - from);
return;
}
- case kAsciiStringTag | kConsStringTag:
+ case kOneByteStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString* cons_string = ConsString::cast(source);
String* first = cons_string->first();
@@ -6793,7 +7277,7 @@ void String::WriteToFlat(String* src,
}
break;
}
- case kAsciiStringTag | kSlicedStringTag:
+ case kOneByteStringTag | kSlicedStringTag:
case kTwoByteStringTag | kSlicedStringTag: {
SlicedString* slice = SlicedString::cast(source);
unsigned offset = slice->offset();
@@ -6883,72 +7367,6 @@ static inline bool CompareStringContentsPartial(Isolate* isolate,
}
-bool String::SlowEqualsExternal(uc16 *string, int length) {
- int len = this->length();
- if (len != length) return false;
- if (len == 0) return true;
-
- // We know the strings are both non-empty. Compare the first chars
- // before we try to flatten the strings.
- if (this->Get(0) != string[0]) return false;
-
- String* lhs = this->TryFlattenGetString();
-
- if (lhs->IsFlat()) {
- String::FlatContent lhs_content = lhs->GetFlatContent();
- if (lhs->IsAsciiRepresentation()) {
- Vector<const char> vec1 = lhs_content.ToAsciiVector();
- VectorIterator<char> buf1(vec1);
- VectorIterator<uc16> ib(string, length);
- return CompareStringContents(&buf1, &ib);
- } else {
- Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
- Vector<const uc16> vec2(string, length);
- return CompareRawStringContents(vec1, vec2);
- }
- } else {
- Isolate* isolate = GetIsolate();
- isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
- VectorIterator<uc16> ib(string, length);
- return CompareStringContents(isolate->objects_string_compare_buffer_a(),
- &ib);
- }
-}
-
-
-bool String::SlowEqualsExternal(char *string, int length) {
- int len = this->length();
- if (len != length) return false;
- if (len == 0) return true;
-
- // We know the strings are both non-empty. Compare the first chars
- // before we try to flatten the strings.
- if (this->Get(0) != string[0]) return false;
-
- String* lhs = this->TryFlattenGetString();
-
- if (StringShape(lhs).IsSequentialAscii()) {
- const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
- return CompareRawStringContents(Vector<const char>(str1, len),
- Vector<const char>(string, len));
- }
-
- if (lhs->IsFlat()) {
- String::FlatContent lhs_content = lhs->GetFlatContent();
- Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
- VectorIterator<const uc16> buf1(vec1);
- VectorIterator<char> buf2(string, length);
- return CompareStringContents(&buf1, &buf2);
- } else {
- Isolate* isolate = GetIsolate();
- isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
- VectorIterator<char> ib(string, length);
- return CompareStringContents(isolate->objects_string_compare_buffer_a(),
- &ib);
- }
-}
-
-
bool String::SlowEquals(String* other) {
// Fast check: negative check with lengths.
int len = length();
@@ -7216,7 +7634,6 @@ void StringHasher::AddSurrogatePairNoIndex(uc32 c) {
uint32_t StringHasher::GetHashField() {
- ASSERT(is_valid());
if (length_ <= String::kMaxHashCalcLength) {
if (is_array_index()) {
return MakeArrayIndexHash(array_index(), length_);
@@ -7271,89 +7688,116 @@ void String::PrintOn(FILE* file) {
}
+static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) {
+ int live_enum = map->EnumLength();
+ if (live_enum == Map::kInvalidEnumCache) {
+ live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
+ }
+ if (live_enum == 0) return descriptors->ClearEnumCache();
+
+ FixedArray* enum_cache = descriptors->GetEnumCache();
+
+ int to_trim = enum_cache->length() - live_enum;
+ if (to_trim <= 0) return;
+ RightTrimFixedArray<FROM_GC>(heap, descriptors->GetEnumCache(), to_trim);
+
+ if (!descriptors->HasEnumIndicesCache()) return;
+ FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
+ RightTrimFixedArray<FROM_GC>(heap, enum_indices_cache, to_trim);
+}
+
+
+static void TrimDescriptorArray(Heap* heap,
+ Map* map,
+ DescriptorArray* descriptors,
+ int number_of_own_descriptors) {
+ int number_of_descriptors = descriptors->number_of_descriptors();
+ int to_trim = number_of_descriptors - number_of_own_descriptors;
+ if (to_trim <= 0) return;
+
+ RightTrimFixedArray<FROM_GC>(heap, descriptors, to_trim);
+ descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+
+ if (descriptors->HasEnumCache()) TrimEnumCache(heap, map, descriptors);
+ descriptors->Sort();
+}
+
+
// Clear a possible back pointer in case the transition leads to a dead map.
// Return true in case a back pointer has been cleared and false otherwise.
-// Set *keep_entry to true when a live map transition has been found.
-static bool ClearBackPointer(Heap* heap, Object* target, bool* keep_entry) {
- if (!target->IsMap()) return false;
- Map* map = Map::cast(target);
- if (Marking::MarkBitFrom(map).Get()) {
- *keep_entry = true;
- return false;
- } else {
- map->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER);
- return true;
- }
+static bool ClearBackPointer(Heap* heap, Map* target) {
+ if (Marking::MarkBitFrom(target).Get()) return false;
+ target->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER);
+ return true;
}
+// TODO(mstarzinger): This method should be moved into MarkCompactCollector,
+// because it cannot be called from outside the GC and we already have methods
+// depending on the transitions layout in the GC anyways.
void Map::ClearNonLiveTransitions(Heap* heap) {
- DescriptorArray* d = DescriptorArray::cast(
- *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
- if (d->IsEmpty()) return;
- Smi* NullDescriptorDetails =
- PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
- FixedArray* contents = FixedArray::cast(
- d->get(DescriptorArray::kContentArrayIndex));
- ASSERT(contents->length() >= 2);
- for (int i = 0; i < contents->length(); i += 2) {
- // If the pair (value, details) is a map transition, check if the target is
- // live. If not, null the descriptor. Also drop the back pointer for that
- // map transition, so that this map is not reached again by following a back
- // pointer from that non-live map.
- bool keep_entry = false;
- PropertyDetails details(Smi::cast(contents->get(i + 1)));
- switch (details.type()) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- ClearBackPointer(heap, contents->get(i), &keep_entry);
- break;
- case ELEMENTS_TRANSITION: {
- Object* object = contents->get(i);
- if (object->IsMap()) {
- ClearBackPointer(heap, object, &keep_entry);
- } else {
- FixedArray* array = FixedArray::cast(object);
- for (int j = 0; j < array->length(); ++j) {
- if (ClearBackPointer(heap, array->get(j), &keep_entry)) {
- array->set_undefined(j);
- }
- }
- }
- break;
+ // If there are no transitions to be cleared, return.
+ // TODO(verwaest) Should be an assert, otherwise back pointers are not
+ // properly cleared.
+ if (!HasTransitionArray()) return;
+
+ TransitionArray* t = transitions();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+
+ int transition_index = 0;
+
+ DescriptorArray* descriptors = instance_descriptors();
+ bool descriptors_owner_died = false;
+
+ // Compact all live descriptors to the left.
+ for (int i = 0; i < t->number_of_transitions(); ++i) {
+ Map* target = t->GetTarget(i);
+ if (ClearBackPointer(heap, target)) {
+ if (target->instance_descriptors() == descriptors) {
+ descriptors_owner_died = true;
}
- case CALLBACKS: {
- Object* object = contents->get(i);
- if (object->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(object);
- if (ClearBackPointer(heap, accessors->getter(), &keep_entry)) {
- accessors->set_getter(heap->the_hole_value());
- }
- if (ClearBackPointer(heap, accessors->setter(), &keep_entry)) {
- accessors->set_setter(heap->the_hole_value());
- }
- } else {
- keep_entry = true;
- }
- break;
+ } else {
+ if (i != transition_index) {
+ String* key = t->GetKey(i);
+ t->SetKey(transition_index, key);
+ Object** key_slot = t->GetKeySlot(transition_index);
+ collector->RecordSlot(key_slot, key_slot, key);
+ // Target slots do not need to be recorded since maps are not compacted.
+ t->SetTarget(transition_index, t->GetTarget(i));
}
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- keep_entry = true;
- break;
+ transition_index++;
+ }
+ }
+
+ if (t->HasElementsTransition() &&
+ ClearBackPointer(heap, t->elements_transition())) {
+ if (t->elements_transition()->instance_descriptors() == descriptors) {
+ descriptors_owner_died = true;
}
- // Make sure that an entry containing only dead transitions gets collected.
- // What we *really* want to do here is removing this entry completely, but
- // for technical reasons we can't do this, so we zero it out instead.
- if (!keep_entry) {
- contents->set_unchecked(i + 1, NullDescriptorDetails);
- contents->set_null_unchecked(heap, i);
+ t->ClearElementsTransition();
+ } else {
+ // If there are no transitions to be cleared, return.
+ // TODO(verwaest) Should be an assert, otherwise back pointers are not
+ // properly cleared.
+ if (transition_index == t->number_of_transitions()) return;
+ }
+
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+
+ if (descriptors_owner_died) {
+ if (number_of_own_descriptors > 0) {
+ TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors);
+ ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ } else {
+ ASSERT(descriptors == GetHeap()->empty_descriptor_array());
}
}
+
+ int trim = t->number_of_transitions() - transition_index;
+ if (trim > 0) {
+ RightTrimFixedArray<FROM_GC>(heap, t, t->IsSimpleTransition()
+ ? trim : trim * TransitionArray::kTransitionSize);
+ }
}
@@ -7386,8 +7830,8 @@ bool Map::EquivalentToForNormalization(Map* other,
instance_type() == other->instance_type() &&
bit_field() == other->bit_field() &&
bit_field2() == other->bit_field2() &&
- (bit_field3() & ~(1<<Map::kIsShared)) ==
- (other->bit_field3() & ~(1<<Map::kIsShared));
+ is_observed() == other->is_observed() &&
+ function_with_prototype() == other->function_with_prototype();
}
@@ -7408,13 +7852,19 @@ void JSFunction::MarkForLazyRecompilation() {
ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
}
+void JSFunction::MarkForParallelRecompilation() {
+ ASSERT(is_compiled() && !IsOptimized());
+ ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
+ Builtins* builtins = GetIsolate()->builtins();
+ ReplaceCode(builtins->builtin(Builtins::kParallelRecompile));
-bool SharedFunctionInfo::EnsureCompiled(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag) {
- return shared->is_compiled() || CompileLazy(shared, flag);
+ // Unlike MarkForLazyRecompilation, after queuing a function for
+ // recompilation on the compiler thread, we actually tail-call into
+ // the full code. We reset the profiler ticks here so that the
+ // function doesn't bother the runtime profiler too much.
+ shared()->code()->set_profiler_ticks(0);
}
-
static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
// Compile the source information to a code object.
@@ -7431,11 +7881,78 @@ static bool CompileLazyHelper(CompilationInfo* info,
bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) {
- CompilationInfo info(shared);
+ ASSERT(shared->allows_lazy_compilation_without_context());
+ CompilationInfoWithZone info(shared);
return CompileLazyHelper(&info, flag);
}
+void SharedFunctionInfo::ClearOptimizedCodeMap() {
+ set_optimized_code_map(Smi::FromInt(0));
+}
+
+
+void SharedFunctionInfo::AddToOptimizedCodeMap(
+ Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ Handle<Code> code,
+ Handle<FixedArray> literals) {
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(native_context->IsNativeContext());
+ STATIC_ASSERT(kEntryLength == 3);
+ Object* value = shared->optimized_code_map();
+ Handle<FixedArray> new_code_map;
+ if (value->IsSmi()) {
+ // No optimized code map.
+ ASSERT_EQ(0, Smi::cast(value)->value());
+ // Crate 3 entries per context {context, code, literals}.
+ new_code_map = FACTORY->NewFixedArray(kEntryLength);
+ new_code_map->set(0, *native_context);
+ new_code_map->set(1, *code);
+ new_code_map->set(2, *literals);
+ } else {
+ // Copy old map and append one new entry.
+ Handle<FixedArray> old_code_map(FixedArray::cast(value));
+ ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*native_context));
+ int old_length = old_code_map->length();
+ int new_length = old_length + kEntryLength;
+ new_code_map = FACTORY->NewFixedArray(new_length);
+ old_code_map->CopyTo(0, *new_code_map, 0, old_length);
+ new_code_map->set(old_length, *native_context);
+ new_code_map->set(old_length + 1, *code);
+ new_code_map->set(old_length + 2, *literals);
+ }
+#ifdef DEBUG
+ for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
+ ASSERT(new_code_map->get(i)->IsNativeContext());
+ ASSERT(new_code_map->get(i + 1)->IsCode());
+ ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
+ Code::OPTIMIZED_FUNCTION);
+ ASSERT(new_code_map->get(i + 2)->IsFixedArray());
+ }
+#endif
+ shared->set_optimized_code_map(*new_code_map);
+}
+
+
+void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
+ int index) {
+ ASSERT(index > 0);
+ ASSERT(optimized_code_map()->IsFixedArray());
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ if (!bound()) {
+ FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
+ ASSERT(cached_literals != NULL);
+ function->set_literals(cached_literals);
+ }
+ Code* code = Code::cast(code_map->get(index));
+ ASSERT(code != NULL);
+ ASSERT(function->context()->native_context() == code_map->get(index - 1));
+ function->ReplaceCode(code);
+ code->MakeYoung();
+}
+
+
bool JSFunction::CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
bool result = true;
@@ -7443,7 +7960,8 @@ bool JSFunction::CompileLazy(Handle<JSFunction> function,
function->ReplaceCode(function->shared()->code());
function->shared()->set_code_age(0);
} else {
- CompilationInfo info(function);
+ ASSERT(function->shared()->allows_lazy_compilation());
+ CompilationInfoWithZone info(function);
result = CompileLazyHelper(&info, flag);
ASSERT(!result || function->is_compiled());
}
@@ -7452,14 +7970,20 @@ bool JSFunction::CompileLazy(Handle<JSFunction> function,
bool JSFunction::CompileOptimized(Handle<JSFunction> function,
- int osr_ast_id,
+ BailoutId osr_ast_id,
ClearExceptionFlag flag) {
- CompilationInfo info(function);
+ CompilationInfoWithZone info(function);
info.SetOptimizing(osr_ast_id);
return CompileLazyHelper(&info, flag);
}
+bool JSFunction::EnsureCompiled(Handle<JSFunction> function,
+ ClearExceptionFlag flag) {
+ return function->is_compiled() || CompileLazy(function, flag);
+}
+
+
bool JSFunction::IsInlineable() {
if (IsBuiltin()) return false;
SharedFunctionInfo* shared_info = shared();
@@ -7474,19 +7998,86 @@ bool JSFunction::IsInlineable() {
}
+MaybeObject* JSObject::OptimizeAsPrototype() {
+ if (IsGlobalObject()) return this;
+
+ // Make sure prototypes are fast objects and their maps have the bit set
+ // so they remain fast.
+ if (!HasFastProperties()) {
+ MaybeObject* new_proto = TransformToFastProperties(0);
+ if (new_proto->IsFailure()) return new_proto;
+ ASSERT(new_proto == this);
+ }
+ return this;
+}
+
+
+MUST_USE_RESULT static MaybeObject* CacheInitialJSArrayMaps(
+ Context* native_context, Map* initial_map) {
+ // Replace all of the cached initial array maps in the native context with
+ // the appropriate transitioned elements kind maps.
+ Heap* heap = native_context->GetHeap();
+ MaybeObject* maybe_maps =
+ heap->AllocateFixedArrayWithHoles(kElementsKindCount);
+ FixedArray* maps;
+ if (!maybe_maps->To(&maps)) return maybe_maps;
+
+ Map* current_map = initial_map;
+ ElementsKind kind = current_map->elements_kind();
+ ASSERT(kind == GetInitialFastElementsKind());
+ maps->set(kind, current_map);
+ for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
+ i < kFastElementsKindCount; ++i) {
+ Map* new_map;
+ ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
+ MaybeObject* maybe_new_map =
+ current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ maps->set(next_kind, new_map);
+ current_map = new_map;
+ }
+ native_context->set_js_array_maps(maps);
+ return initial_map;
+}
+
+
MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
ASSERT(value->IsJSReceiver());
Heap* heap = GetHeap();
+
+ // First some logic for the map of the prototype to make sure it is in fast
+ // mode.
+ if (value->IsJSObject()) {
+ MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
+ if (ok->IsFailure()) return ok;
+ }
+
+ // Now some logic for the maps of the objects that are created by using this
+ // function as a constructor.
if (has_initial_map()) {
- // If the function has allocated the initial map
- // replace it with a copy containing the new prototype.
+ // If the function has allocated the initial map replace it with a
+ // copy containing the new prototype. Also complete any in-object
+ // slack tracking that is in progress at this point because it is
+ // still tracking the old copy.
+ if (shared()->IsInobjectSlackTrackingInProgress()) {
+ shared()->CompleteInobjectSlackTracking();
+ }
Map* new_map;
- MaybeObject* maybe_new_map = initial_map()->CopyDropTransitions();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ MaybeObject* maybe_object = initial_map()->Copy();
+ if (!maybe_object->To(&new_map)) return maybe_object;
new_map->set_prototype(value);
- MaybeObject* maybe_object =
- set_initial_map_and_cache_transitions(new_map);
- if (maybe_object->IsFailure()) return maybe_object;
+
+ // If the function is used as the global Array function, cache the
+ // initial map (and transitioned versions) in the native context.
+ Context* native_context = context()->native_context();
+ Object* array_function = native_context->get(Context::ARRAY_FUNCTION_INDEX);
+ if (array_function->IsJSFunction() &&
+ this == JSFunction::cast(array_function)) {
+ MaybeObject* ok = CacheInitialJSArrayMaps(native_context, new_map);
+ if (ok->IsFailure()) return ok;
+ }
+
+ set_initial_map(new_map);
} else {
// Put the value in the initial map field until an initial map is
// needed. At that point, a new initial map is created and the
@@ -7511,15 +8102,15 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
// Remove map transitions because they point to maps with a
// different prototype.
Map* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- }
+ MaybeObject* maybe_new_map = map()->Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
Heap* heap = new_map->GetHeap();
set_map(new_map);
new_map->set_constructor(value);
new_map->set_non_instance_prototype(true);
construct_prototype =
- heap->isolate()->context()->global_context()->
+ heap->isolate()->context()->native_context()->
initial_object_prototype();
} else {
map()->set_non_instance_prototype(false);
@@ -7529,41 +8120,36 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
}
-Object* JSFunction::RemovePrototype() {
- Context* global_context = context()->global_context();
+void JSFunction::RemovePrototype() {
+ Context* native_context = context()->native_context();
Map* no_prototype_map = shared()->is_classic_mode()
- ? global_context->function_without_prototype_map()
- : global_context->strict_mode_function_without_prototype_map();
+ ? native_context->function_without_prototype_map()
+ : native_context->strict_mode_function_without_prototype_map();
- if (map() == no_prototype_map) {
- // Be idempotent.
- return this;
- }
+ if (map() == no_prototype_map) return;
ASSERT(map() == (shared()->is_classic_mode()
- ? global_context->function_map()
- : global_context->strict_mode_function_map()));
+ ? native_context->function_map()
+ : native_context->strict_mode_function_map()));
set_map(no_prototype_map);
set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
- return this;
}
-Object* JSFunction::SetInstanceClassName(String* name) {
+void JSFunction::SetInstanceClassName(String* name) {
shared()->set_instance_class_name(name);
- return this;
}
void JSFunction::PrintName(FILE* out) {
SmartArrayPointer<char> name = shared()->DebugName()->ToCString();
- PrintF(out, "%s", *name);
+ FPrintF(out, "%s", *name);
}
-Context* JSFunction::GlobalContextFromLiterals(FixedArray* literals) {
- return Context::cast(literals->get(JSFunction::kLiteralGlobalContextIndex));
+Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
+ return Context::cast(literals->get(JSFunction::kLiteralNativeContextIndex));
}
@@ -7631,26 +8217,33 @@ bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
return false;
}
- // If the prototype is null inline constructors cause no problems.
- if (!prototype->IsJSObject()) {
- ASSERT(prototype->IsNull());
- return true;
- }
-
Heap* heap = GetHeap();
- // Traverse the proposed prototype chain looking for setters for properties of
- // the same names as are set by the inline constructor.
+ // Traverse the proposed prototype chain looking for properties of the
+ // same names as are set by the inline constructor.
for (Object* obj = prototype;
obj != heap->null_value();
obj = obj->GetPrototype()) {
- JSObject* js_object = JSObject::cast(obj);
+ JSReceiver* receiver = JSReceiver::cast(obj);
for (int i = 0; i < this_property_assignments_count(); i++) {
LookupResult result(heap->isolate());
String* name = GetThisPropertyAssignmentName(i);
- js_object->LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound() && result.type() == CALLBACKS) {
- return false;
+ receiver->LocalLookup(name, &result);
+ if (result.IsFound()) {
+ switch (result.type()) {
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ break;
+ case INTERCEPTOR:
+ case CALLBACKS:
+ case HANDLER:
+ return false;
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
+ }
}
}
}
@@ -7795,7 +8388,7 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
}
-void SharedFunctionInfo::DisableOptimization() {
+void SharedFunctionInfo::DisableOptimization(const char* reason) {
// Disable optimization for the shared function info and mark the
// code as non-optimizable. The marker on the shared function info
// is there because we flush non-optimized code thereby loosing the
@@ -7811,13 +8404,14 @@ void SharedFunctionInfo::DisableOptimization() {
code()->set_optimizable(false);
}
if (FLAG_trace_opt) {
- PrintF("[disabled optimization for %s]\n", *DebugName()->ToCString());
+ PrintF("[disabled optimization for %s, reason: %s]\n",
+ *DebugName()->ToCString(), reason);
}
}
-bool SharedFunctionInfo::VerifyBailoutId(int id) {
- ASSERT(id != AstNode::kNoNumber);
+bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
+ ASSERT(!id.IsNone());
Code* unoptimized = code();
DeoptimizationOutputData* data =
DeoptimizationOutputData::cast(unoptimized->deoptimization_data());
@@ -7859,8 +8453,7 @@ void SharedFunctionInfo::DetachInitialMap() {
Map* map = reinterpret_cast<Map*>(initial_map());
// Make the map remember to restore the link if it survives the GC.
- map->set_bit_field3(
- map->bit_field3() | (1 << Map::kAttachedToSharedFunctionInfo));
+ map->set_attached_to_shared_function_info(true);
// Undo state changes made by StartInobjectTracking (except the
// construction_count). This way if the initial map does not survive the GC
@@ -7868,7 +8461,7 @@ void SharedFunctionInfo::DetachInitialMap() {
// constructor is called. The countdown will continue and (possibly after
// several more GCs) CompleteInobjectSlackTracking will eventually be called.
Heap* heap = map->GetHeap();
- set_initial_map(heap->raw_unchecked_undefined_value());
+ set_initial_map(heap->undefined_value());
Builtins* builtins = heap->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
*RawField(this, kConstructStubOffset));
@@ -7880,8 +8473,7 @@ void SharedFunctionInfo::DetachInitialMap() {
// Called from GC, hence reinterpret_cast and unchecked accessors.
void SharedFunctionInfo::AttachInitialMap(Map* map) {
- map->set_bit_field3(
- map->bit_field3() & ~(1 << Map::kAttachedToSharedFunctionInfo));
+ map->set_attached_to_shared_function_info(false);
// Resume inobject slack tracking.
set_initial_map(map);
@@ -7900,12 +8492,13 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
if (code()->kind() == Code::FUNCTION) {
code()->set_profiler_ticks(0);
if (optimization_disabled() &&
- opt_count() >= Compiler::kDefaultMaxOptCount) {
+ opt_count() >= FLAG_max_opt_count) {
// Re-enable optimizations if they were disabled due to opt_count limit.
set_optimization_disabled(false);
code()->set_optimizable(true);
}
set_opt_count(0);
+ set_deopt_count(0);
}
}
@@ -7953,9 +8546,20 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() {
}
-void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
- v->VisitSharedFunctionInfo(this);
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
+int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
+ ASSERT(native_context->IsNativeContext());
+ if (!FLAG_cache_optimized_code) return -1;
+ Object* value = optimized_code_map();
+ if (!value->IsSmi()) {
+ FixedArray* optimized_code_map = FixedArray::cast(value);
+ int length = optimized_code_map->length();
+ for (int i = 0; i < length; i += 3) {
+ if (optimized_code_map->get(i) == native_context) {
+ return i + 1;
+ }
+ }
+ }
+ return -1;
}
@@ -7984,6 +8588,15 @@ void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
}
+void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Object* stub = rinfo->code_age_stub();
+ if (stub) {
+ VisitPointer(&stub);
+ }
+}
+
+
void ObjectVisitor::VisitCodeEntry(Address entry_address) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
@@ -8183,7 +8796,6 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
TypeFeedbackCells* type_feedback_cells =
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- ASSERT(type_feedback_cells->AstId(i)->IsSmi());
JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
}
@@ -8191,25 +8803,124 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
}
+bool Code::allowed_in_shared_map_code_cache() {
+ return is_keyed_load_stub() || is_keyed_store_stub() ||
+ (is_compare_ic_stub() && compare_state() == CompareIC::KNOWN_OBJECTS);
+}
+
+
+void Code::MakeCodeAgeSequenceYoung(byte* sequence) {
+ PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
+}
+
+
+void Code::MakeYoung() {
+ byte* sequence = FindCodeAgeSequence();
+ if (sequence != NULL) {
+ PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
+ }
+}
+
+
+void Code::MakeOlder(MarkingParity current_parity) {
+ byte* sequence = FindCodeAgeSequence();
+ if (sequence != NULL) {
+ Age age;
+ MarkingParity code_parity;
+ GetCodeAgeAndParity(sequence, &age, &code_parity);
+ if (age != kLastCodeAge && code_parity != current_parity) {
+ PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1),
+ current_parity);
+ }
+ }
+}
+
+
+bool Code::IsOld() {
+ byte* sequence = FindCodeAgeSequence();
+ if (sequence == NULL) return false;
+ Age age;
+ MarkingParity parity;
+ GetCodeAgeAndParity(sequence, &age, &parity);
+ return age >= kSexagenarianCodeAge;
+}
+
+
+byte* Code::FindCodeAgeSequence() {
+ return FLAG_age_code &&
+ strlen(FLAG_stop_at) == 0 &&
+ !ProfileEntryHookStub::HasEntryHook() &&
+ (kind() == OPTIMIZED_FUNCTION ||
+ (kind() == FUNCTION && !has_debug_break_slots()))
+ ? FindPlatformCodeAgeSequence()
+ : NULL;
+}
+
+
+void Code::GetCodeAgeAndParity(Code* code, Age* age,
+ MarkingParity* parity) {
+ Isolate* isolate = Isolate::Current();
+ Builtins* builtins = isolate->builtins();
+ Code* stub = NULL;
+#define HANDLE_CODE_AGE(AGE) \
+ stub = *builtins->Make##AGE##CodeYoungAgainEvenMarking(); \
+ if (code == stub) { \
+ *age = k##AGE##CodeAge; \
+ *parity = EVEN_MARKING_PARITY; \
+ return; \
+ } \
+ stub = *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
+ if (code == stub) { \
+ *age = k##AGE##CodeAge; \
+ *parity = ODD_MARKING_PARITY; \
+ return; \
+ }
+ CODE_AGE_LIST(HANDLE_CODE_AGE)
+#undef HANDLE_CODE_AGE
+ UNREACHABLE();
+}
+
+
+Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
+ Isolate* isolate = Isolate::Current();
+ Builtins* builtins = isolate->builtins();
+ switch (age) {
+#define HANDLE_CODE_AGE(AGE) \
+ case k##AGE##CodeAge: { \
+ Code* stub = parity == EVEN_MARKING_PARITY \
+ ? *builtins->Make##AGE##CodeYoungAgainEvenMarking() \
+ : *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
+ return stub; \
+ }
+ CODE_AGE_LIST(HANDLE_CODE_AGE)
+#undef HANDLE_CODE_AGE
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return NULL;
+}
+
+
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
disasm::NameConverter converter;
int deopt_count = DeoptCount();
- PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
+ FPrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
if (0 == deopt_count) return;
- PrintF(out, "%6s %6s %6s %6s %12s\n", "index", "ast id", "argc", "pc",
+ FPrintF(out, "%6s %6s %6s %6s %12s\n", "index", "ast id", "argc", "pc",
FLAG_print_code_verbose ? "commands" : "");
for (int i = 0; i < deopt_count; i++) {
- PrintF(out, "%6d %6d %6d %6d",
+ FPrintF(out, "%6d %6d %6d %6d",
i,
- AstId(i)->value(),
+ AstId(i).ToInt(),
ArgumentsStackHeight(i)->value(),
Pc(i)->value());
if (!FLAG_print_code_verbose) {
- PrintF(out, "\n");
+ FPrintF(out, "\n");
continue;
}
// Print details of the frame translation.
@@ -8220,7 +8931,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
ASSERT(Translation::BEGIN == opcode);
int frame_count = iterator.Next();
int jsframe_count = iterator.Next();
- PrintF(out, " %s {frame count=%d, js frame count=%d}\n",
+ FPrintF(out, " %s {frame count=%d, js frame count=%d}\n",
Translation::StringFor(opcode),
frame_count,
jsframe_count);
@@ -8228,7 +8939,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
while (iterator.HasNext() &&
Translation::BEGIN !=
(opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
- PrintF(out, "%24s %s ", "", Translation::StringFor(opcode));
+ FPrintF(out, "%24s %s ", "", Translation::StringFor(opcode));
switch (opcode) {
case Translation::BEGIN:
@@ -8238,12 +8949,15 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
case Translation::JS_FRAME: {
int ast_id = iterator.Next();
int function_id = iterator.Next();
- JSFunction* function =
- JSFunction::cast(LiteralArray()->get(function_id));
unsigned height = iterator.Next();
- PrintF(out, "{ast_id=%d, function=", ast_id);
- function->PrintName(out);
- PrintF(out, ", height=%u}", height);
+ FPrintF(out, "{ast_id=%d, function=", ast_id);
+ if (function_id != Translation::kSelfLiteralId) {
+ Object* function = LiteralArray()->get(function_id);
+ JSFunction::cast(function)->PrintName(out);
+ } else {
+ FPrintF(out, "<self>");
+ }
+ FPrintF(out, ", height=%u}", height);
break;
}
@@ -8253,9 +8967,20 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
JSFunction* function =
JSFunction::cast(LiteralArray()->get(function_id));
unsigned height = iterator.Next();
- PrintF(out, "{function=");
+ FPrintF(out, "{function=");
+ function->PrintName(out);
+ FPrintF(out, ", height=%u}", height);
+ break;
+ }
+
+ case Translation::GETTER_STUB_FRAME:
+ case Translation::SETTER_STUB_FRAME: {
+ int function_id = iterator.Next();
+ JSFunction* function =
+ JSFunction::cast(LiteralArray()->get(function_id));
+ FPrintF(out, "{function=");
function->PrintName(out);
- PrintF(out, ", height=%u}", height);
+ FPrintF(out, "}");
break;
}
@@ -8264,58 +8989,72 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
case Translation::REGISTER: {
int reg_code = iterator.Next();
- PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
+ FPrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
break;
}
case Translation::INT32_REGISTER: {
int reg_code = iterator.Next();
- PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
+ FPrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
+ break;
+ }
+
+ case Translation::UINT32_REGISTER: {
+ int reg_code = iterator.Next();
+ FPrintF(out,
+ "{input=%s (unsigned)}",
+ converter.NameOfCPURegister(reg_code));
break;
}
case Translation::DOUBLE_REGISTER: {
int reg_code = iterator.Next();
- PrintF(out, "{input=%s}",
+ FPrintF(out, "{input=%s}",
DoubleRegister::AllocationIndexToString(reg_code));
break;
}
case Translation::STACK_SLOT: {
int input_slot_index = iterator.Next();
- PrintF(out, "{input=%d}", input_slot_index);
+ FPrintF(out, "{input=%d}", input_slot_index);
break;
}
case Translation::INT32_STACK_SLOT: {
int input_slot_index = iterator.Next();
- PrintF(out, "{input=%d}", input_slot_index);
+ FPrintF(out, "{input=%d}", input_slot_index);
+ break;
+ }
+
+ case Translation::UINT32_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ FPrintF(out, "{input=%d (unsigned)}", input_slot_index);
break;
}
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator.Next();
- PrintF(out, "{input=%d}", input_slot_index);
+ FPrintF(out, "{input=%d}", input_slot_index);
break;
}
case Translation::LITERAL: {
unsigned literal_index = iterator.Next();
- PrintF(out, "{literal_id=%u}", literal_index);
+ FPrintF(out, "{literal_id=%u}", literal_index);
break;
}
case Translation::ARGUMENTS_OBJECT:
break;
}
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
}
}
void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
- PrintF(out, "Deoptimization Output Data (deopt points = %d)\n",
+ FPrintF(out, "Deoptimization Output Data (deopt points = %d)\n",
this->DeoptPoints());
if (this->DeoptPoints() == 0) return;
@@ -8323,7 +9062,7 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
for (int i = 0; i < this->DeoptPoints(); i++) {
int pc_and_state = this->PcAndState(i)->value();
PrintF("%6d %8d %s\n",
- this->AstId(i)->value(),
+ this->AstId(i).ToInt(),
FullCodeGenerator::PcField::decode(pc_and_state),
FullCodeGenerator::State2String(
FullCodeGenerator::StateField::decode(pc_and_state)));
@@ -8369,18 +9108,15 @@ const char* Code::ICState2String(InlineCacheState state) {
}
-const char* Code::PropertyType2String(PropertyType type) {
+const char* Code::StubType2String(StubType type) {
switch (type) {
case NORMAL: return "NORMAL";
case FIELD: return "FIELD";
case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
case CALLBACKS: return "CALLBACKS";
- case HANDLER: return "HANDLER";
case INTERCEPTOR: return "INTERCEPTOR";
case MAP_TRANSITION: return "MAP_TRANSITION";
- case ELEMENTS_TRANSITION: return "ELEMENTS_TRANSITION";
- case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
- case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
+ case NONEXISTENT: return "NONEXISTENT";
}
UNREACHABLE(); // keep the compiler happy
return NULL;
@@ -8405,43 +9141,43 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
break;
}
if (name != NULL) {
- PrintF(out, "extra_ic_state = %s\n", name);
+ FPrintF(out, "extra_ic_state = %s\n", name);
} else {
- PrintF(out, "extra_ic_state = %d\n", extra);
+ FPrintF(out, "extra_ic_state = %d\n", extra);
}
}
void Code::Disassemble(const char* name, FILE* out) {
- PrintF(out, "kind = %s\n", Kind2String(kind()));
+ FPrintF(out, "kind = %s\n", Kind2String(kind()));
if (is_inline_cache_stub()) {
- PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
+ FPrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
PrintExtraICState(out, kind(), extra_ic_state());
if (ic_state() == MONOMORPHIC) {
- PrintF(out, "type = %s\n", PropertyType2String(type()));
+ FPrintF(out, "type = %s\n", StubType2String(type()));
}
if (is_call_stub() || is_keyed_call_stub()) {
- PrintF(out, "argc = %d\n", arguments_count());
+ FPrintF(out, "argc = %d\n", arguments_count());
}
if (is_compare_ic_stub()) {
CompareIC::State state = CompareIC::ComputeState(this);
- PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
+ FPrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
}
if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
Token::Value op = CompareIC::ComputeOperation(this);
- PrintF(out, "compare_operation = %s\n", Token::Name(op));
+ FPrintF(out, "compare_operation = %s\n", Token::Name(op));
}
}
if ((name != NULL) && (name[0] != '\0')) {
- PrintF(out, "name = %s\n", name);
+ FPrintF(out, "name = %s\n", name);
}
if (kind() == OPTIMIZED_FUNCTION) {
- PrintF(out, "stack_slots = %d\n", stack_slots());
+ FPrintF(out, "stack_slots = %d\n", stack_slots());
}
- PrintF(out, "Instructions (size = %d)\n", instruction_size());
+ FPrintF(out, "Instructions (size = %d)\n", instruction_size());
Disassembler::Decode(out, this);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
if (kind() == FUNCTION) {
DeoptimizationOutputData* data =
@@ -8456,24 +9192,26 @@ void Code::Disassemble(const char* name, FILE* out) {
if (kind() == OPTIMIZED_FUNCTION) {
SafepointTable table(this);
- PrintF(out, "Safepoints (size = %u)\n", table.size());
+ FPrintF(out, "Safepoints (size = %u)\n", table.size());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
- PrintF(out, "%p %4d ", (instruction_start() + pc_offset), pc_offset);
+ FPrintF(out, "%p %4d ", (instruction_start() + pc_offset), pc_offset);
table.PrintEntry(i);
- PrintF(out, " (sp -> fp)");
+ FPrintF(out, " (sp -> fp)");
SafepointEntry entry = table.GetEntry(i);
if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
- PrintF(out, " %6d", entry.deoptimization_index());
+ FPrintF(out, " %6d", entry.deoptimization_index());
} else {
- PrintF(out, " <none>");
+ FPrintF(out, " <none>");
}
if (entry.argument_count() > 0) {
- PrintF(out, " argc: %d", entry.argument_count());
+ FPrintF(out, " argc: %d", entry.argument_count());
}
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
- PrintF(out, "\n");
+ FPrintF(out, "\n");
+ // Just print if type feedback info is ever used for optimized code.
+ ASSERT(type_feedback_info()->IsUndefined());
} else if (kind() == FUNCTION) {
unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after
@@ -8482,19 +9220,25 @@ void Code::Disassemble(const char* name, FILE* out) {
unsigned* address =
reinterpret_cast<unsigned*>(instruction_start() + offset);
unsigned length = address[0];
- PrintF(out, "Stack checks (size = %u)\n", length);
- PrintF(out, "ast_id pc_offset\n");
+ FPrintF(out, "Stack checks (size = %u)\n", length);
+ FPrintF(out, "ast_id pc_offset\n");
for (unsigned i = 0; i < length; ++i) {
unsigned index = (2 * i) + 1;
- PrintF(out, "%6u %9u\n", address[index], address[index + 1]);
+ FPrintF(out, "%6u %9u\n", address[index], address[index + 1]);
}
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
+#ifdef OBJECT_PRINT
+ if (!type_feedback_info()->IsUndefined()) {
+ TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(out);
+ FPrintF(out, "\n");
+ }
+#endif
}
PrintF("RelocInfo (size = %d)\n", relocation_size());
for (RelocIterator it(this); !it.done(); it.next()) it.rinfo()->Print(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
#endif // ENABLE_DISASSEMBLER
@@ -8502,7 +9246,7 @@ void Code::Disassemble(const char* name, FILE* out) {
MaybeObject* JSObject::SetFastElementsCapacityAndLength(
int capacity,
int length,
- SetFastElementsCapacityMode set_capacity_mode) {
+ SetFastElementsCapacitySmiMode smi_mode) {
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
@@ -8513,34 +9257,40 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
if (!maybe->To(&new_elements)) return maybe;
}
- // Find the new map to use for this object if there is a map change.
- Map* new_map = NULL;
- if (elements()->map() != heap->non_strict_arguments_elements_map()) {
- // The resized array has FAST_SMI_ONLY_ELEMENTS if the capacity mode forces
- // it, or if it's allowed and the old elements array contained only SMIs.
- bool has_fast_smi_only_elements =
- (set_capacity_mode == kForceSmiOnlyElements) ||
- ((set_capacity_mode == kAllowSmiOnlyElements) &&
- (elements()->map()->has_fast_smi_only_elements() ||
- elements() == heap->empty_fixed_array()));
- ElementsKind elements_kind = has_fast_smi_only_elements
- ? FAST_SMI_ONLY_ELEMENTS
- : FAST_ELEMENTS;
- MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
- if (!maybe->To(&new_map)) return maybe;
+ ElementsKind elements_kind = GetElementsKind();
+ ElementsKind new_elements_kind;
+ // The resized array has FAST_*_SMI_ELEMENTS if the capacity mode forces it,
+ // or if it's allowed and the old elements array contained only SMIs.
+ bool has_fast_smi_elements =
+ (smi_mode == kForceSmiElements) ||
+ ((smi_mode == kAllowSmiElements) && HasFastSmiElements());
+ if (has_fast_smi_elements) {
+ if (IsHoleyElementsKind(elements_kind)) {
+ new_elements_kind = FAST_HOLEY_SMI_ELEMENTS;
+ } else {
+ new_elements_kind = FAST_SMI_ELEMENTS;
+ }
+ } else {
+ if (IsHoleyElementsKind(elements_kind)) {
+ new_elements_kind = FAST_HOLEY_ELEMENTS;
+ } else {
+ new_elements_kind = FAST_ELEMENTS;
+ }
}
-
FixedArrayBase* old_elements = elements();
- ElementsKind elements_kind = GetElementsKind();
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
- ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS)
- ? FAST_SMI_ONLY_ELEMENTS
- : FAST_ELEMENTS;
{ MaybeObject* maybe_obj =
- accessor->CopyElements(this, new_elements, to_kind);
+ accessor->CopyElements(this, new_elements, new_elements_kind);
if (maybe_obj->IsFailure()) return maybe_obj;
}
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
+ Map* new_map = map();
+ if (new_elements_kind != elements_kind) {
+ MaybeObject* maybe =
+ GetElementsTransitionMap(GetIsolate(), new_elements_kind);
+ if (!maybe->To(&new_map)) return maybe;
+ }
+ ValidateElements();
set_map_and_elements(new_map, new_elements);
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
@@ -8552,11 +9302,9 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
GetElementsKind(), new_elements);
}
- // Update the length if necessary.
if (IsJSArray()) {
JSArray::cast(this)->set_length(Smi::FromInt(length));
}
-
return new_elements;
}
@@ -8574,20 +9322,28 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
if (!maybe_obj->To(&elems)) return maybe_obj;
}
+ ElementsKind elements_kind = GetElementsKind();
+ ElementsKind new_elements_kind = elements_kind;
+ if (IsHoleyElementsKind(elements_kind)) {
+ new_elements_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
+ } else {
+ new_elements_kind = FAST_DOUBLE_ELEMENTS;
+ }
+
Map* new_map;
{ MaybeObject* maybe_obj =
- GetElementsTransitionMap(heap->isolate(), FAST_DOUBLE_ELEMENTS);
+ GetElementsTransitionMap(heap->isolate(), new_elements_kind);
if (!maybe_obj->To(&new_map)) return maybe_obj;
}
FixedArrayBase* old_elements = elements();
- ElementsKind elements_kind = GetElementsKind();
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
{ MaybeObject* maybe_obj =
accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
if (maybe_obj->IsFailure()) return maybe_obj;
}
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
+ ValidateElements();
set_map_and_elements(new_map, elems);
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
@@ -8596,7 +9352,7 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
if (FLAG_trace_elements_transitions) {
PrintElementsTransition(stdout, elements_kind, old_elements,
- FAST_DOUBLE_ELEMENTS, elems);
+ GetElementsKind(), elems);
}
if (IsJSArray()) {
@@ -8636,8 +9392,8 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
}
-Object* Map::GetPrototypeTransition(Object* prototype) {
- FixedArray* cache = prototype_transitions();
+Map* Map::GetPrototypeTransition(Object* prototype) {
+ FixedArray* cache = GetPrototypeTransitions();
int number_of_transitions = NumberOfProtoTransitions();
const int proto_offset =
kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
@@ -8646,8 +9402,7 @@ Object* Map::GetPrototypeTransition(Object* prototype) {
for (int i = 0; i < number_of_transitions; i++) {
if (cache->get(proto_offset + i * step) == prototype) {
Object* map = cache->get(map_offset + i * step);
- ASSERT(map->IsMap());
- return map;
+ return Map::cast(map);
}
}
return NULL;
@@ -8660,7 +9415,7 @@ MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
// Don't cache prototype transition if this map is shared.
if (is_shared() || !FLAG_cache_prototype_transitions) return this;
- FixedArray* cache = prototype_transitions();
+ FixedArray* cache = GetPrototypeTransitions();
const int step = kProtoTransitionElementsPerEntry;
const int header = kProtoTransitionHeaderSize;
@@ -8683,7 +9438,8 @@ MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
new_cache->set(i + header, cache->get(i + header));
}
cache = new_cache;
- set_prototype_transitions(cache);
+ MaybeObject* set_result = SetPrototypeTransitions(cache);
+ if (set_result->IsFailure()) return set_result;
}
int last = transitions - 1;
@@ -8696,6 +9452,22 @@ MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
}
+void Map::ZapTransitions() {
+ TransitionArray* transition_array = transitions();
+ MemsetPointer(transition_array->data_start(),
+ GetHeap()->the_hole_value(),
+ transition_array->length());
+}
+
+
+void Map::ZapPrototypeTransitions() {
+ FixedArray* proto_transitions = GetPrototypeTransitions();
+ MemsetPointer(proto_transitions->data_start(),
+ GetHeap()->the_hole_value(),
+ proto_transitions->length());
+}
+
+
MaybeObject* JSReceiver::SetPrototype(Object* value,
bool skip_hidden_prototypes) {
#ifdef DEBUG
@@ -8755,21 +9527,24 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
// Nothing to do if prototype is already set.
if (map->prototype() == value) return value;
- Object* new_map = map->GetPrototypeTransition(value);
+ if (value->IsJSObject()) {
+ MaybeObject* ok = JSObject::cast(value)->OptimizeAsPrototype();
+ if (ok->IsFailure()) return ok;
+ }
+
+ Map* new_map = map->GetPrototypeTransition(value);
if (new_map == NULL) {
- { MaybeObject* maybe_new_map = map->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
+ MaybeObject* maybe_new_map = map->Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- { MaybeObject* maybe_new_cache =
- map->PutPrototypeTransition(value, Map::cast(new_map));
- if (maybe_new_cache->IsFailure()) return maybe_new_cache;
- }
+ MaybeObject* maybe_new_cache =
+ map->PutPrototypeTransition(value, new_map);
+ if (maybe_new_cache->IsFailure()) return maybe_new_cache;
- Map::cast(new_map)->set_prototype(value);
+ new_map->set_prototype(value);
}
- ASSERT(Map::cast(new_map)->prototype() == value);
- real_receiver->set_map(Map::cast(new_map));
+ ASSERT(new_map->prototype() == value);
+ real_receiver->set_map(new_map);
heap->ClearInstanceofCache();
ASSERT(size == Size());
@@ -8790,64 +9565,7 @@ MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
}
-bool JSObject::HasElementWithInterceptor(JSReceiver* receiver, uint32_t index) {
- Isolate* isolate = GetIsolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSReceiver> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- if (!interceptor->query()->IsUndefined()) {
- v8::IndexedPropertyQuery query =
- v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = query(index, info);
- }
- if (!result.IsEmpty()) {
- ASSERT(result->IsInt32());
- return true; // absence of property is signaled by empty handle.
- }
- } else if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(index, info);
- }
- if (!result.IsEmpty()) return true;
- }
-
- if (holder_handle->GetElementsAccessor()->HasElement(
- *receiver_handle, *holder_handle, index)) {
- return true;
- }
-
- if (holder_handle->IsStringObjectWithCharacterAt(index)) return true;
- Object* pt = holder_handle->GetPrototype();
- if (pt->IsJSProxy()) {
- // We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(
- receiver, index) != ABSENT;
- }
- if (pt->IsNull()) return false;
- return JSObject::cast(pt)->HasElementWithReceiver(*receiver_handle, index);
-}
-
-
-JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
+JSObject::LocalElementType JSObject::GetLocalElementType(uint32_t index) {
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
Heap* heap = GetHeap();
@@ -8861,13 +9579,13 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
Object* proto = GetPrototype();
if (proto->IsNull()) return UNDEFINED_ELEMENT;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->HasLocalElement(index);
+ return JSObject::cast(proto)->GetLocalElementType(index);
}
// Check for lookup interceptor
if (HasIndexedInterceptor()) {
- return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
- : UNDEFINED_ELEMENT;
+ return GetElementAttributeWithInterceptor(this, index, false) != ABSENT
+ ? INTERCEPTED_ELEMENT : UNDEFINED_ELEMENT;
}
// Handle [] on String objects.
@@ -8876,8 +9594,10 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
(Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -8888,7 +9608,8 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
break;
}
- case FAST_DOUBLE_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
(Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -8953,40 +9674,6 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
-bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
- }
- }
-
- // Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return HasElementWithInterceptor(receiver, index);
- }
-
- ElementsAccessor* accessor = GetElementsAccessor();
- if (accessor->HasElement(receiver, this, index)) {
- return true;
- }
-
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
-
- Object* pt = GetPrototype();
- if (pt->IsNull()) return false;
- if (pt->IsJSProxy()) {
- // We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(
- receiver, index) != ABSENT;
- }
- return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
-}
-
-
MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
Object* value,
PropertyAttributes attributes,
@@ -9041,6 +9728,7 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
Handle<AccessorInfo> data(AccessorInfo::cast(structure));
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ if (call_fun == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
Handle<JSObject> self(JSObject::cast(receiver));
Handle<JSObject> holder_handle(JSObject::cast(holder));
@@ -9057,7 +9745,9 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (result.IsEmpty()) return isolate->heap()->undefined_value();
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
// __defineGetter__ callback
@@ -9172,7 +9862,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
bool check_prototype) {
- ASSERT(HasFastTypeElements() ||
+ ASSERT(HasFastSmiOrObjectElements() ||
HasFastArgumentsElements());
FixedArray* backing_store = FixedArray::cast(elements());
@@ -9198,13 +9888,29 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
// Check if the length property of this object needs to be updated.
uint32_t array_length = 0;
bool must_update_array_length = false;
+ bool introduces_holes = true;
if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+ introduces_holes = index > array_length;
if (index >= array_length) {
must_update_array_length = true;
array_length = index + 1;
}
+ } else {
+ introduces_holes = index >= capacity;
+ }
+
+ // If the array is growing, and it's not growth by a single element at the
+ // end, make sure that the ElementsKind is HOLEY.
+ ElementsKind elements_kind = GetElementsKind();
+ if (introduces_holes &&
+ IsFastElementsKind(elements_kind) &&
+ !IsFastHoleyElementsKind(elements_kind)) {
+ ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
+ MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
+ if (maybe->IsFailure()) return maybe;
}
+
// Check if the capacity of the backing store needs to be increased, or if
// a transition to slow elements is necessary.
if (index >= capacity) {
@@ -9224,42 +9930,44 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
}
}
// Convert to fast double elements if appropriate.
- if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) {
+ if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
MaybeObject* maybe =
SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
if (maybe->IsFailure()) return maybe;
FixedDoubleArray::cast(elements())->set(index, value->Number());
+ ValidateElements();
return value;
}
- // Change elements kind from SMI_ONLY to generic FAST if necessary.
- if (HasFastSmiOnlyElements() && !value->IsSmi()) {
+ // Change elements kind from Smi-only to generic FAST if necessary.
+ if (HasFastSmiElements() && !value->IsSmi()) {
Map* new_map;
- { MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
- FAST_ELEMENTS);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- }
+ ElementsKind kind = HasFastHoleyElements()
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
+ kind);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+
set_map(new_map);
- if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, FAST_SMI_ONLY_ELEMENTS, elements(),
- FAST_ELEMENTS, elements());
- }
}
// Increase backing store capacity if that's been decided previously.
if (new_capacity != capacity) {
FixedArray* new_elements;
- SetFastElementsCapacityMode set_capacity_mode =
- value->IsSmi() && HasFastSmiOnlyElements()
- ? kAllowSmiOnlyElements
- : kDontAllowSmiOnlyElements;
+ SetFastElementsCapacitySmiMode smi_mode =
+ value->IsSmi() && HasFastSmiElements()
+ ? kAllowSmiElements
+ : kDontAllowSmiElements;
{ MaybeObject* maybe =
SetFastElementsCapacityAndLength(new_capacity,
array_length,
- set_capacity_mode);
+ smi_mode);
if (!maybe->To(&new_elements)) return maybe;
}
new_elements->set(index, value);
+ ValidateElements();
return value;
}
+
// Finally, set the new element and length.
ASSERT(elements()->IsFixedArray());
backing_store->set(index, value);
@@ -9303,7 +10011,8 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
// is read-only (a declared const that has not been initialized). If a
// value is being defined we skip attribute checks completely.
if (set_mode == DEFINE_PROPERTY) {
- details = PropertyDetails(attributes, NORMAL, details.index());
+ details = PropertyDetails(
+ attributes, NORMAL, details.dictionary_index());
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
if (strict_mode == kNonStrictMode) {
@@ -9383,20 +10092,21 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
} else {
new_length = dictionary->max_number_key() + 1;
}
- SetFastElementsCapacityMode set_capacity_mode = FLAG_smi_only_arrays
- ? kAllowSmiOnlyElements
- : kDontAllowSmiOnlyElements;
+ SetFastElementsCapacitySmiMode smi_mode = FLAG_smi_only_arrays
+ ? kAllowSmiElements
+ : kDontAllowSmiElements;
bool has_smi_only_elements = false;
bool should_convert_to_fast_double_elements =
ShouldConvertToFastDoubleElements(&has_smi_only_elements);
if (has_smi_only_elements) {
- set_capacity_mode = kForceSmiOnlyElements;
+ smi_mode = kForceSmiElements;
}
MaybeObject* result = should_convert_to_fast_double_elements
? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
: SetFastElementsCapacityAndLength(new_length,
new_length,
- set_capacity_mode);
+ smi_mode);
+ ValidateElements();
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -9435,27 +10145,40 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
// If the value object is not a heap number, switch to fast elements and try
// again.
bool value_is_smi = value->IsSmi();
+ bool introduces_holes = true;
+ uint32_t length = elms_length;
+ if (IsJSArray()) {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ introduces_holes = index > length;
+ } else {
+ introduces_holes = index >= elms_length;
+ }
+
if (!value->IsNumber()) {
- Object* obj;
- uint32_t length = elms_length;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
- }
MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
elms_length,
length,
- kDontAllowSmiOnlyElements);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- return SetFastElement(index,
- value,
- strict_mode,
- check_prototype);
+ kDontAllowSmiElements);
+ if (maybe_obj->IsFailure()) return maybe_obj;
+ maybe_obj = SetFastElement(index, value, strict_mode, check_prototype);
+ if (maybe_obj->IsFailure()) return maybe_obj;
+ ValidateElements();
+ return maybe_obj;
}
double double_value = value_is_smi
? static_cast<double>(Smi::cast(value)->value())
: HeapNumber::cast(value)->value();
+ // If the array is growing, and it's not growth by a single element at the
+ // end, make sure that the ElementsKind is HOLEY.
+ ElementsKind elements_kind = GetElementsKind();
+ if (introduces_holes && !IsFastHoleyElementsKind(elements_kind)) {
+ ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
+ MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
+ if (maybe->IsFailure()) return maybe;
+ }
+
// Check whether there is extra space in the fixed array.
if (index < elms_length) {
FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
@@ -9477,13 +10200,11 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
int new_capacity = NewElementsCapacity(index+1);
if (!ShouldConvertToSlowElements(new_capacity)) {
ASSERT(static_cast<uint32_t>(new_capacity) > index);
- Object* obj;
- { MaybeObject* maybe_obj =
- SetFastDoubleElementsCapacityAndLength(new_capacity,
- index + 1);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_obj =
+ SetFastDoubleElementsCapacityAndLength(new_capacity, index + 1);
+ if (maybe_obj->IsFailure()) return maybe_obj;
FixedDoubleArray::cast(elements())->set(index, double_value);
+ ValidateElements();
return value;
}
}
@@ -9508,7 +10229,7 @@ MaybeObject* JSReceiver::SetElement(uint32_t index,
bool check_proto) {
if (IsJSProxy()) {
return JSProxy::cast(this)->SetElementWithHandler(
- index, value, strict_mode);
+ this, index, value, strict_mode);
} else {
return JSObject::cast(this)->SetElement(
index, value, attributes, strict_mode, check_proto);
@@ -9550,28 +10271,31 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
MaybeObject* JSObject::SetElement(uint32_t index,
- Object* value,
+ Object* value_raw,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSObject> self(this);
+ Handle<Object> value(value_raw);
+
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
- HandleScope scope(heap->isolate());
- Handle<Object> value_handle(value);
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return *value_handle;
+ if (!heap->isolate()->MayIndexedAccess(*self, index, v8::ACCESS_SET)) {
+ heap->isolate()->ReportFailedAccessCheck(*self, v8::ACCESS_SET);
+ return *value;
}
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return value;
+ if (proto->IsNull()) return *value;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetElement(index,
- value,
+ *value,
attributes,
strict_mode,
check_prototype,
@@ -9580,9 +10304,8 @@ MaybeObject* JSObject::SetElement(uint32_t index,
// Don't allow element properties to be redefined for external arrays.
if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
- Isolate* isolate = GetHeap()->isolate();
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { Handle<Object>(this), number };
+ Handle<Object> args[] = { self, number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
return isolate->Throw(*error);
@@ -9597,22 +10320,55 @@ MaybeObject* JSObject::SetElement(uint32_t index,
dictionary->set_requires_slow_elements();
}
+ // From here on, everything has to be handlified.
+ Handle<String> name;
+ Handle<Object> old_value(isolate->heap()->the_hole_value());
+ Handle<Object> old_array_length;
+ PropertyAttributes old_attributes = ABSENT;
+ bool preexists = false;
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ name = isolate->factory()->Uint32ToString(index);
+ preexists = self->HasLocalElement(index);
+ if (preexists) {
+ old_attributes = self->GetLocalPropertyAttribute(*name);
+ // TODO(observe): only read & set old_value if we have a data property
+ old_value = Object::GetElement(self, index);
+ } else if (self->IsJSArray()) {
+ // Store old array length in case adding an element grows the array.
+ old_array_length = handle(Handle<JSArray>::cast(self)->length());
+ }
+ }
+
// Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return SetElementWithInterceptor(index,
- value,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
+ MaybeObject* result = self->HasIndexedInterceptor()
+ ? self->SetElementWithInterceptor(
+ index, *value, attributes, strict_mode, check_prototype, set_mode)
+ : self->SetElementWithoutInterceptor(
+ index, *value, attributes, strict_mode, check_prototype, set_mode);
+
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult)) return result;
+
+ if (FLAG_harmony_observation && map()->is_observed()) {
+ PropertyAttributes new_attributes = self->GetLocalPropertyAttribute(*name);
+ if (!preexists) {
+ EnqueueChangeRecord(self, "new", name, old_value);
+ if (self->IsJSArray() &&
+ !old_array_length->SameValue(Handle<JSArray>::cast(self)->length())) {
+ EnqueueChangeRecord(self, "updated",
+ isolate->factory()->length_symbol(),
+ old_array_length);
+ }
+ } else if (new_attributes != old_attributes || old_value->IsTheHole()) {
+ EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ } else {
+ Handle<Object> new_value = Object::GetElement(self, index);
+ if (!new_value->SameValue(*old_value))
+ EnqueueChangeRecord(self, "updated", name, old_value);
+ }
}
- return SetElementWithoutInterceptor(index,
- value,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
+ return *hresult;
}
@@ -9627,10 +10383,13 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
(attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
Isolate* isolate = GetIsolate();
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
return SetFastElement(index, value, strict_mode, check_prototype);
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
return SetFastDoubleElement(index, value, strict_mode, check_prototype);
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
@@ -9721,11 +10480,19 @@ Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
ElementsKind from_kind = map()->elements_kind();
+ if (IsFastHoleyElementsKind(from_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+
Isolate* isolate = GetIsolate();
- if ((from_kind == FAST_SMI_ONLY_ELEMENTS ||
- elements() == isolate->heap()->empty_fixed_array()) &&
- to_kind == FAST_ELEMENTS) {
- ASSERT(from_kind != FAST_ELEMENTS);
+ if (elements() == isolate->heap()->empty_fixed_array() ||
+ (IsFastSmiOrObjectElementsKind(from_kind) &&
+ IsFastSmiOrObjectElementsKind(to_kind)) ||
+ (from_kind == FAST_DOUBLE_ELEMENTS &&
+ to_kind == FAST_HOLEY_DOUBLE_ELEMENTS)) {
+ ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
+ // No change is needed to the elements() buffer, the transition
+ // only requires a map change.
MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
Map* new_map;
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
@@ -9752,18 +10519,21 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
}
}
- if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
- to_kind == FAST_DOUBLE_ELEMENTS) {
+ if (IsFastSmiElementsKind(from_kind) &&
+ IsFastDoubleElementsKind(to_kind)) {
MaybeObject* maybe_result =
SetFastDoubleElementsCapacityAndLength(capacity, length);
if (maybe_result->IsFailure()) return maybe_result;
+ ValidateElements();
return this;
}
- if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ if (IsFastDoubleElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind)) {
MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
- capacity, length, kDontAllowSmiOnlyElements);
+ capacity, length, kDontAllowSmiElements);
if (maybe_result->IsFailure()) return maybe_result;
+ ValidateElements();
return this;
}
@@ -9777,10 +10547,14 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
// static
bool Map::IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind) {
- return
- (from_kind == FAST_SMI_ONLY_ELEMENTS &&
- (to_kind == FAST_DOUBLE_ELEMENTS || to_kind == FAST_ELEMENTS)) ||
- (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS);
+ // Transitions can't go backwards.
+ if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
+ return false;
+ }
+
+ // Transitions from HOLEY -> PACKED are not allowed.
+ return !IsFastHoleyElementsKind(from_kind) ||
+ IsFastHoleyElementsKind(to_kind);
}
@@ -9826,7 +10600,11 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
result = getter(index, info);
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ if (!result.IsEmpty()) {
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
+ }
}
Heap* heap = holder_handle->GetHeap();
@@ -9871,8 +10649,16 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
break;
}
// Fall through.
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
+ if (IsJSArray()) {
+ *capacity = backing_store_base->length();
+ *used = Smi::cast(JSArray::cast(this)->length())->value();
+ break;
+ }
+ // Fall through if packing is not guaranteed.
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
backing_store = FixedArray::cast(backing_store_base);
*capacity = backing_store->length();
for (int i = 0; i < *capacity; ++i) {
@@ -9886,7 +10672,14 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
*used = dictionary->NumberOfElements();
break;
}
- case FAST_DOUBLE_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS:
+ if (IsJSArray()) {
+ *capacity = backing_store_base->length();
+ *used = Smi::cast(JSArray::cast(this)->length())->value();
+ break;
+ }
+ // Fall through if packing is not guaranteed.
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
*capacity = elms->length();
for (int i = 0; i < *capacity; i++) {
@@ -10003,15 +10796,15 @@ void Dictionary<Shape, Key>::Print(FILE* out) {
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
if (HashTable<Shape, Key>::IsKey(k)) {
- PrintF(out, " ");
+ FPrintF(out, " ");
if (k->IsString()) {
String::cast(k)->StringPrint(out);
} else {
k->ShortPrint(out);
}
- PrintF(out, ": ");
+ FPrintF(out, ": ");
ValueAt(i)->ShortPrint(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
}
}
}
@@ -10055,13 +10848,13 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() {
MaybeObject* JSObject::GetPropertyPostInterceptor(
- JSReceiver* receiver,
+ Object* receiver,
String* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty()) {
+ if (result.IsFound()) {
return GetProperty(receiver, &result, name, attributes);
}
// Continue searching via the prototype chain.
@@ -10073,13 +10866,13 @@ MaybeObject* JSObject::GetPropertyPostInterceptor(
MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
- JSReceiver* receiver,
+ Object* receiver,
String* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
LookupResult result(GetIsolate());
LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty()) {
+ if (result.IsFound()) {
return GetProperty(receiver, &result, name, attributes);
}
return GetHeap()->undefined_value();
@@ -10087,13 +10880,13 @@ MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
MaybeObject* JSObject::GetPropertyWithInterceptor(
- JSReceiver* receiver,
+ Object* receiver,
String* name,
PropertyAttributes* attributes) {
Isolate* isolate = GetIsolate();
InterceptorInfo* interceptor = GetNamedInterceptor();
HandleScope scope(isolate);
- Handle<JSReceiver> receiver_handle(receiver);
+ Handle<Object> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
@@ -10113,7 +10906,9 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!result.IsEmpty()) {
*attributes = NONE;
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
}
@@ -10138,7 +10933,7 @@ bool JSObject::HasRealNamedProperty(String* key) {
LookupResult result(isolate);
LocalLookupRealNamedProperty(key, &result);
- return result.IsProperty() && (result.type() != INTERCEPTOR);
+ return result.IsFound() && !result.IsInterceptor();
}
@@ -10156,16 +10951,19 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
if (this->IsStringObjectWithCharacterAt(index)) return true;
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
- uint32_t length = IsJSArray() ?
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length());
return (index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole();
}
- case FAST_DOUBLE_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -10215,14 +11013,21 @@ bool JSObject::HasRealNamedCallbackProperty(String* key) {
LookupResult result(isolate);
LocalLookupRealNamedProperty(key, &result);
- return result.IsFound() && (result.type() == CALLBACKS);
+ return result.IsPropertyCallbacks();
}
int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
- return HasFastProperties() ?
- map()->NumberOfDescribedProperties(filter) :
- property_dictionary()->NumberOfElementsFilterAttributes(filter);
+ if (HasFastProperties()) {
+ Map* map = this->map();
+ if (filter == NONE) return map->NumberOfOwnDescriptors();
+ if (filter == DONT_ENUM) {
+ int result = map->EnumLength();
+ if (result != Map::kInvalidEnumCache) return result;
+ }
+ return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
+ }
+ return property_dictionary()->NumberOfElementsFilterAttributes(filter);
}
@@ -10345,11 +11150,12 @@ void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
ASSERT(storage->length() >= (NumberOfLocalProperties() - index));
if (HasFastProperties()) {
+ int real_size = map()->NumberOfOwnDescriptors();
DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->IsProperty(i)) storage->set(index++, descs->GetKey(i));
+ ASSERT(storage->length() >= index + real_size);
+ for (int i = 0; i < real_size; i++) {
+ storage->set(index + i, descs->GetKey(i));
}
- ASSERT(storage->length() >= index);
} else {
property_dictionary()->CopyKeysTo(storage,
index,
@@ -10365,7 +11171,7 @@ int JSObject::NumberOfLocalElements(PropertyAttributes filter) {
int JSObject::NumberOfEnumElements() {
// Fast case for objects with no elements.
- if (!IsJSValue() && HasFastElements()) {
+ if (!IsJSValue() && HasFastObjectElements()) {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
Smi::cast(JSArray::cast(this)->length())->value()) :
@@ -10381,8 +11187,10 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
PropertyAttributes filter) {
int counter = 0;
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
FixedArray::cast(elements())->length();
@@ -10397,7 +11205,8 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter);
break;
}
- case FAST_DOUBLE_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
FixedDoubleArray::cast(elements())->length();
@@ -10934,8 +11743,12 @@ void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
+ MinimumCapacity capacity_option,
PretenureFlag pretenure) {
- int capacity = ComputeCapacity(at_least_space_for);
+ ASSERT(!capacity_option || IS_POWER_OF_TWO(at_least_space_for));
+ int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
+ ? at_least_space_for
+ : ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
return Failure::OutOfMemoryException();
}
@@ -10992,31 +11805,6 @@ int StringDictionary::FindEntry(String* key) {
}
-bool StringDictionary::ContainsTransition(int entry) {
- switch (DetailsAt(entry).type()) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
- return true;
- case CALLBACKS: {
- Object* value = ValueAt(entry);
- if (!value->IsAccessorPair()) return false;
- AccessorPair* accessors = AccessorPair::cast(value);
- return accessors->getter()->IsMap() || accessors->setter()->IsMap();
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- return false;
- }
- UNREACHABLE(); // Keep the compiler happy.
- return false;
-}
-
-
template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
ASSERT(NumberOfElements() < new_table->Capacity());
@@ -11069,7 +11857,9 @@ MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
(capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
Object* obj;
{ MaybeObject* maybe_obj =
- Allocate(nof * 2, pretenure ? TENURED : NOT_TENURED);
+ Allocate(nof * 2,
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -11098,7 +11888,9 @@ MaybeObject* HashTable<Shape, Key>::Shrink(Key key) {
!GetHeap()->InNewSpace(this);
Object* obj;
{ MaybeObject* maybe_obj =
- Allocate(at_least_room_for, pretenure ? TENURED : NOT_TENURED);
+ Allocate(at_least_room_for,
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -11349,10 +12141,9 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
// Convert to fast elements.
Object* obj;
- { MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
- FAST_ELEMENTS);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
+ FAST_HOLEY_ELEMENTS);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
Map* new_map = Map::cast(obj);
PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
@@ -11363,9 +12154,9 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
}
FixedArray* fast_elements = FixedArray::cast(new_array);
dict->CopyValuesTo(fast_elements);
+ ValidateElements();
- set_map(new_map);
- set_elements(fast_elements);
+ set_map_and_elements(new_map, fast_elements);
} else if (HasExternalArrayElements()) {
// External arrays cannot have holes or undefined elements.
return Smi::FromInt(ExternalArray::cast(elements())->length());
@@ -11375,7 +12166,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
}
- ASSERT(HasFastTypeElements() || HasFastDoubleElements());
+ ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements());
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
@@ -11505,7 +12296,7 @@ Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
clamped_value = 255;
} else {
// Other doubles are rounded to the nearest integer.
- clamped_value = static_cast<uint8_t>(double_value + 0.5);
+ clamped_value = static_cast<uint8_t>(lrint(double_value));
}
} else {
// Clamp undefined to zero (default). All other types have been
@@ -11709,7 +12500,7 @@ class TwoCharHashTableKey : public HashTableKey {
hash += hash << 3;
hash ^= hash >> 11;
hash += hash << 15;
- if ((hash & String::kHashBitMask) == 0) hash = String::kZeroHash;
+ if ((hash & String::kHashBitMask) == 0) hash = StringHasher::kZeroHash;
#ifdef DEBUG
StringHasher hasher(2, seed);
hasher.AddCharacter(c1);
@@ -11845,8 +12636,23 @@ MaybeObject* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
}
-Object* CompilationCacheTable::Lookup(String* src) {
- StringKey key(src);
+// The key for the script compilation cache is dependent on the mode flags,
+// because they change the global language mode and thus binding behaviour.
+// If flags change at some point, we must ensure that we do not hit the cache
+// for code compiled with different settings.
+static LanguageMode CurrentGlobalLanguageMode() {
+ return FLAG_use_strict
+ ? (FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE)
+ : CLASSIC_MODE;
+}
+
+
+Object* CompilationCacheTable::Lookup(String* src, Context* context) {
+ SharedFunctionInfo* shared = context->closure()->shared();
+ StringSharedKey key(src,
+ shared,
+ CurrentGlobalLanguageMode(),
+ RelocInfo::kNoPosition);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
@@ -11876,17 +12682,24 @@ Object* CompilationCacheTable::LookupRegExp(String* src,
}
-MaybeObject* CompilationCacheTable::Put(String* src, Object* value) {
- StringKey key(src);
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+MaybeObject* CompilationCacheTable::Put(String* src,
+ Context* context,
+ Object* value) {
+ SharedFunctionInfo* shared = context->closure()->shared();
+ StringSharedKey key(src,
+ shared,
+ CurrentGlobalLanguageMode(),
+ RelocInfo::kNoPosition);
+ CompilationCacheTable* cache;
+ MaybeObject* maybe_cache = EnsureCapacity(1, &key);
+ if (!maybe_cache->To(&cache)) return maybe_cache;
+
+ Object* k;
+ MaybeObject* maybe_k = key.AsObject();
+ if (!maybe_k->To(&k)) return maybe_k;
- CompilationCacheTable* cache =
- reinterpret_cast<CompilationCacheTable*>(obj);
int entry = cache->FindInsertionEntry(key.Hash());
- cache->set(EntryToIndex(entry), src);
+ cache->set(EntryToIndex(entry), k);
cache->set(EntryToIndex(entry) + 1, value);
cache->ElementAdded();
return cache;
@@ -12030,6 +12843,12 @@ MaybeObject* Dictionary<Shape, Key>::Allocate(int at_least_space_for) {
}
+void StringDictionary::DoGenerateNewEnumerationIndices(
+ Handle<StringDictionary> dictionary) {
+ CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(),
+ dictionary->GenerateNewEnumerationIndices());
+}
+
template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
Heap* heap = Dictionary<Shape, Key>::GetHeap();
@@ -12056,7 +12875,8 @@ MaybeObject* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() {
int pos = 0;
for (int i = 0; i < capacity; i++) {
if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
- enumeration_order->set(pos++, Smi::FromInt(DetailsAt(i).index()));
+ int index = DetailsAt(i).dictionary_index();
+ enumeration_order->set(pos++, Smi::FromInt(index));
}
}
@@ -12155,6 +12975,8 @@ template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::Add(Key key,
Object* value,
PropertyDetails details) {
+ ASSERT(details.dictionary_index() == details.descriptor_index());
+
// Valdate key is absent.
SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
// Check whether the dictionary should be extended.
@@ -12182,7 +13004,9 @@ MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key,
uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash);
// Insert element at empty or deleted entry
- if (!details.IsDeleted() && details.index() == 0 && Shape::kIsEnumerable) {
+ if (!details.IsDeleted() &&
+ details.dictionary_index() == 0 &&
+ Shape::kIsEnumerable) {
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = NextEnumerationIndex();
@@ -12273,7 +13097,7 @@ MaybeObject* SeededNumberDictionary::Set(uint32_t key,
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
- DetailsAt(entry).index());
+ DetailsAt(entry).dictionary_index());
MaybeObject* maybe_object_key = SeededNumberDictionaryShape::AsObject(key);
Object* object_key;
if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
@@ -12344,23 +13168,45 @@ void Dictionary<Shape, Key>::CopyKeysTo(
}
-void StringDictionary::CopyEnumKeysTo(FixedArray* storage,
- FixedArray* sort_array) {
- ASSERT(storage->length() >= NumberOfEnumElements());
+FixedArray* StringDictionary::CopyEnumKeysTo(FixedArray* storage) {
+ int length = storage->length();
+ ASSERT(length >= NumberOfEnumElements());
+ Heap* heap = GetHeap();
+ Object* undefined_value = heap->undefined_value();
int capacity = Capacity();
- int index = 0;
+ int properties = 0;
+
+ // Fill in the enumeration array by assigning enumerable keys at their
+ // enumeration index. This will leave holes in the array if there are keys
+ // that are deleted or not enumerable.
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
if (IsKey(k)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted() || details.IsDontEnum()) continue;
- storage->set(index, k);
- sort_array->set(index, Smi::FromInt(details.index()));
- index++;
+ properties++;
+ storage->set(details.dictionary_index() - 1, k);
+ if (properties == length) break;
}
}
- storage->SortPairs(sort_array, sort_array->length());
- ASSERT(storage->length() >= index);
+
+ // There are holes in the enumeration array if less properties were assigned
+ // than the length of the array. If so, crunch all the existing properties
+ // together by shifting them to the left (maintaining the enumeration order),
+ // and trimming of the right side of the array.
+ if (properties < length) {
+ if (properties == 0) return heap->empty_fixed_array();
+ properties = 0;
+ for (int i = 0; i < length; ++i) {
+ Object* value = storage->get(i);
+ if (value != undefined_value) {
+ storage->set(properties, value);
+ ++properties;
+ }
+ }
+ RightTrimFixedArray<FROM_MUTATOR>(heap, storage, length - properties);
+ }
+ return storage;
}
@@ -12410,18 +13256,12 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
JSObject* obj, int unused_property_fields) {
// Make sure we preserve dictionary representation if there are too many
// descriptors.
- if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj;
-
- // Figure out if it is necessary to generate new enumeration indices.
- int max_enumeration_index =
- NextEnumerationIndex() +
- (DescriptorArray::kMaxNumberOfDescriptors -
- NumberOfElements());
- if (!PropertyDetails::IsValidIndex(max_enumeration_index)) {
- Object* result;
- { MaybeObject* maybe_result = GenerateNewEnumerationIndices();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ int number_of_elements = NumberOfElements();
+ if (number_of_elements > DescriptorArray::kMaxNumberOfDescriptors) return obj;
+
+ if (number_of_elements != NextEnumerationIndex()) {
+ MaybeObject* maybe_result = GenerateNewEnumerationIndices();
+ if (maybe_result->IsFailure()) return maybe_result;
}
int instance_descriptor_length = 0;
@@ -12445,18 +13285,35 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
}
+ int inobject_props = obj->map()->inobject_properties();
+
+ // Allocate new map.
+ Map* new_map;
+ MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ new_map->set_dictionary_map(false);
+
+ if (instance_descriptor_length == 0) {
+ ASSERT_LE(unused_property_fields, inobject_props);
+ // Transform the object.
+ new_map->set_unused_property_fields(inobject_props);
+ obj->set_map(new_map);
+ obj->set_properties(heap->empty_fixed_array());
+ // Check that it really works.
+ ASSERT(obj->HasFastProperties());
+ return obj;
+ }
+
// Allocate the instance descriptor.
DescriptorArray* descriptors;
- { MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(instance_descriptor_length);
- if (!maybe_descriptors->To<DescriptorArray>(&descriptors)) {
- return maybe_descriptors;
- }
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(instance_descriptor_length);
+ if (!maybe_descriptors->To(&descriptors)) {
+ return maybe_descriptors;
}
DescriptorArray::WhitenessWitness witness(descriptors);
- int inobject_props = obj->map()->inobject_properties();
int number_of_allocated_fields =
number_of_fields + unused_property_fields - inobject_props;
if (number_of_allocated_fields < 0) {
@@ -12466,33 +13323,33 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
// Allocate the fixed array for the fields.
- Object* fields;
- { MaybeObject* maybe_fields =
- heap->AllocateFixedArray(number_of_allocated_fields);
- if (!maybe_fields->ToObject(&fields)) return maybe_fields;
- }
+ FixedArray* fields;
+ MaybeObject* maybe_fields =
+ heap->AllocateFixedArray(number_of_allocated_fields);
+ if (!maybe_fields->To(&fields)) return maybe_fields;
// Fill in the instance descriptor and the fields.
- int next_descriptor = 0;
int current_offset = 0;
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
if (IsKey(k)) {
Object* value = ValueAt(i);
// Ensure the key is a symbol before writing into the instance descriptor.
- Object* key;
- { MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k));
- if (!maybe_key->ToObject(&key)) return maybe_key;
- }
+ String* key;
+ MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k));
+ if (!maybe_key->To(&key)) return maybe_key;
+
PropertyDetails details = DetailsAt(i);
+ ASSERT(details.descriptor_index() == details.dictionary_index());
+ int enumeration_index = details.descriptor_index();
PropertyType type = details.type();
if (value->IsJSFunction() && !heap->InNewSpace(value)) {
- ConstantFunctionDescriptor d(String::cast(key),
+ ConstantFunctionDescriptor d(key,
JSFunction::cast(value),
details.attributes(),
- details.index());
- descriptors->Set(next_descriptor++, &d, witness);
+ enumeration_index);
+ descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == NORMAL) {
if (current_offset < inobject_props) {
obj->InObjectPropertyAtPut(current_offset,
@@ -12500,24 +13357,19 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
UPDATE_WRITE_BARRIER);
} else {
int offset = current_offset - inobject_props;
- FixedArray::cast(fields)->set(offset, value);
+ fields->set(offset, value);
}
- FieldDescriptor d(String::cast(key),
+ FieldDescriptor d(key,
current_offset++,
details.attributes(),
- details.index());
- descriptors->Set(next_descriptor++, &d, witness);
+ enumeration_index);
+ descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == CALLBACKS) {
- if (value->IsAccessorPair()) {
- MaybeObject* maybe_copy =
- AccessorPair::cast(value)->CopyWithoutTransitions();
- if (!maybe_copy->To(&value)) return maybe_copy;
- }
- CallbacksDescriptor d(String::cast(key),
+ CallbacksDescriptor d(key,
value,
details.attributes(),
- details.index());
- descriptors->Set(next_descriptor++, &d, witness);
+ enumeration_index);
+ descriptors->Set(enumeration_index - 1, &d, witness);
} else {
UNREACHABLE();
}
@@ -12525,22 +13377,17 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
ASSERT(current_offset == number_of_fields);
- descriptors->Sort(witness);
- // Allocate new map.
- Object* new_map;
- { MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
+ descriptors->Sort();
+
+ new_map->InitializeDescriptors(descriptors);
+ new_map->set_unused_property_fields(unused_property_fields);
// Transform the object.
- obj->set_map(Map::cast(new_map));
- obj->map()->set_instance_descriptors(descriptors);
- obj->map()->set_unused_property_fields(unused_property_fields);
+ obj->set_map(new_map);
- obj->set_properties(FixedArray::cast(fields));
+ obj->set_properties(fields);
ASSERT(obj->IsJSObject());
- descriptors->SetNextEnumerationIndex(NextEnumerationIndex());
// Check that it really works.
ASSERT(obj->HasFastProperties());
@@ -12611,11 +13458,11 @@ Object* ObjectHashTable::Lookup(Object* key) {
// If the object does not have an identity hash, it was never used as a key.
{ MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
- return GetHeap()->undefined_value();
+ return GetHeap()->the_hole_value();
}
}
int entry = FindEntry(key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
+ if (entry == kNotFound) return GetHeap()->the_hole_value();
return get(EntryToIndex(entry) + 1);
}
@@ -12632,7 +13479,7 @@ MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
int entry = FindEntry(key);
// Check whether to perform removal operation.
- if (value->IsUndefined()) {
+ if (value->IsTheHole()) {
if (entry == kNotFound) return this;
RemoveEntry(entry);
return Shrink(key);
diff --git a/src/3rdparty/v8/src/objects.h b/src/3rdparty/v8/src/objects.h
index 59458da..fe9655a 100644
--- a/src/3rdparty/v8/src/objects.h
+++ b/src/3rdparty/v8/src/objects.h
@@ -30,9 +30,10 @@
#include "allocation.h"
#include "builtins.h"
+#include "elements-kind.h"
#include "list.h"
#include "property-details.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
@@ -40,6 +41,7 @@
#include "mips/constants-mips.h"
#endif
#include "v8checks.h"
+#include "zone.h"
//
@@ -82,6 +84,7 @@
// - Context
// - JSFunctionResultCache
// - ScopeInfo
+// - TransitionArray
// - FixedDoubleArray
// - ExternalArray
// - ExternalPixelArray
@@ -131,40 +134,6 @@
namespace v8 {
namespace internal {
-enum ElementsKind {
- // The "fast" kind for elements that only contain SMI values. Must be first
- // to make it possible to efficiently check maps for this kind.
- FAST_SMI_ONLY_ELEMENTS,
-
- // The "fast" kind for tagged values. Must be second to make it possible to
- // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
- // together at once.
- FAST_ELEMENTS,
-
- // The "fast" kind for unwrapped, non-tagged double values.
- FAST_DOUBLE_ELEMENTS,
-
- // The "slow" kind.
- DICTIONARY_ELEMENTS,
- NON_STRICT_ARGUMENTS_ELEMENTS,
- // The "fast" kind for external arrays
- EXTERNAL_BYTE_ELEMENTS,
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
- EXTERNAL_SHORT_ELEMENTS,
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
- EXTERNAL_INT_ELEMENTS,
- EXTERNAL_UNSIGNED_INT_ELEMENTS,
- EXTERNAL_FLOAT_ELEMENTS,
- EXTERNAL_DOUBLE_ELEMENTS,
- EXTERNAL_PIXEL_ELEMENTS,
-
- // Derived constants from ElementsKind
- FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
- LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
- FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS,
- LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
-};
-
enum CompareMapMode {
REQUIRE_EXACT_MAP,
ALLOW_ELEMENT_TRANSITION_MAPS
@@ -175,13 +144,6 @@ enum KeyedAccessGrowMode {
ALLOW_JSARRAY_GROWTH
};
-const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
-
-void PrintElementsKind(FILE* out, ElementsKind kind);
-
-inline bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
- ElementsKind to_kind);
-
// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
@@ -209,9 +171,47 @@ enum CreationFlag {
};
+// Indicates whether transitions can be added to a source map or not.
+enum TransitionFlag {
+ INSERT_TRANSITION,
+ OMIT_TRANSITION
+};
+
+
+// Indicates whether the transition is simple: the target map of the transition
+// either extends the current map with a new property, or it modifies the
+// property that was added last to the current map.
+enum SimpleTransitionFlag {
+ SIMPLE_TRANSITION,
+ FULL_TRANSITION
+};
+
+
+// Indicates whether we are only interested in the descriptors of a particular
+// map, or in all descriptors in the descriptor array.
+enum DescriptorFlag {
+ ALL_DESCRIPTORS,
+ OWN_DESCRIPTORS
+};
+
+// The GC maintains a bit of information, the MarkingParity, which toggles
+// from odd to even and back every time marking is completed. Incremental
+// marking can visit an object twice during a marking phase, so algorithms that
+// that piggy-back on marking can use the parity to ensure that they only
+// perform an operation on an object once per marking phase: they record the
+// MarkingParity when they visit an object, and only re-visit the object when it
+// is marked again and the MarkingParity changes.
+enum MarkingParity {
+ NO_MARKING_PARITY,
+ ODD_MARKING_PARITY,
+ EVEN_MARKING_PARITY
+};
+
// Instance size sentinel for objects of variable size.
const int kVariableSizeSentinel = 0;
+const int kStubMajorKeyBits = 6;
+const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// All Maps have a field instance_type containing a InstanceType.
// It describes the type of the instances.
@@ -479,7 +479,7 @@ const uint32_t kSymbolTag = 0x40;
// two-byte characters or one-byte characters.
const uint32_t kStringEncodingMask = 0x4;
const uint32_t kTwoByteStringTag = 0x0;
-const uint32_t kAsciiStringTag = 0x4;
+const uint32_t kOneByteStringTag = 0x4;
// If bit 7 is clear, the low-order 2 bits indicate the representation
// of the string.
@@ -530,39 +530,39 @@ const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType {
// String types.
SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
- ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
+ ASCII_SYMBOL_TYPE = kOneByteStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
- CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
+ CONS_ASCII_SYMBOL_TYPE = kOneByteStringTag | kSymbolTag | kConsStringTag,
SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag |
kExternalStringTag | kShortExternalStringTag,
SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kSymbolTag | kExternalStringTag |
kAsciiDataHintTag | kShortExternalStringTag,
- SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kAsciiStringTag | kExternalStringTag |
+ SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kOneByteStringTag | kExternalStringTag |
kSymbolTag | kShortExternalStringTag,
EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
EXTERNAL_ASCII_SYMBOL_TYPE =
- kAsciiStringTag | kSymbolTag | kExternalStringTag,
+ kOneByteStringTag | kSymbolTag | kExternalStringTag,
STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
- ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
+ ASCII_STRING_TYPE = kOneByteStringTag | kSeqStringTag,
CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
- CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
+ CONS_ASCII_STRING_TYPE = kOneByteStringTag | kConsStringTag,
SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
- SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag,
+ SLICED_ASCII_STRING_TYPE = kOneByteStringTag | kSlicedStringTag,
SHORT_EXTERNAL_STRING_TYPE =
kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag,
SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag |
kAsciiDataHintTag | kShortExternalStringTag,
SHORT_EXTERNAL_ASCII_STRING_TYPE =
- kAsciiStringTag | kExternalStringTag | kShortExternalStringTag,
+ kOneByteStringTag | kExternalStringTag | kShortExternalStringTag,
EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
// LAST_STRING_TYPE
- EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
+ EXTERNAL_ASCII_STRING_TYPE = kOneByteStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
// Objects allocated in their own spaces (never in new space).
@@ -684,6 +684,25 @@ STATIC_CHECK(ODDBALL_TYPE == Internals::kOddballType);
STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
+ V(FAST_ELEMENTS_SUB_TYPE) \
+ V(DICTIONARY_ELEMENTS_SUB_TYPE) \
+ V(FAST_PROPERTIES_SUB_TYPE) \
+ V(DICTIONARY_PROPERTIES_SUB_TYPE) \
+ V(MAP_CODE_CACHE_SUB_TYPE) \
+ V(SCOPE_INFO_SUB_TYPE) \
+ V(SYMBOL_TABLE_SUB_TYPE) \
+ V(DESCRIPTOR_ARRAY_SUB_TYPE) \
+ V(TRANSITION_ARRAY_SUB_TYPE)
+
+enum FixedArraySubInstanceType {
+#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
+#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
+ LAST_FIXED_ARRAY_SUB_TYPE = TRANSITION_ARRAY_SUB_TYPE
+};
+
+
enum CompareResult {
LESS = -1,
EQUAL = 0,
@@ -704,12 +723,13 @@ enum CompareResult {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
+class AccessorPair;
class DictionaryElementsAccessor;
class ElementsAccessor;
+class Failure;
class FixedArrayBase;
class ObjectVisitor;
class StringStream;
-class Failure;
struct ValueInfo : public Malloced {
ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -723,6 +743,11 @@ struct ValueInfo : public Malloced {
// A template-ized version of the IsXXX functions.
template <class C> static inline bool Is(Object* obj);
+#ifdef VERIFY_HEAP
+#define DECLARE_VERIFIER(Name) void Name##Verify();
+#else
+#define DECLARE_VERIFIER(Name)
+#endif
class MaybeObject BASE_EMBEDDED {
public:
@@ -756,6 +781,13 @@ class MaybeObject BASE_EMBEDDED {
return true;
}
+ template<typename T>
+ inline bool ToHandle(Handle<T>* obj) {
+ if (IsFailure()) return false;
+ *obj = handle(T::cast(reinterpret_cast<Object*>(this)));
+ return true;
+ }
+
#ifdef OBJECT_PRINT
// Prints this object with details.
inline void Print() {
@@ -767,7 +799,7 @@ class MaybeObject BASE_EMBEDDED {
void Print(FILE* out);
void PrintLn(FILE* out);
#endif
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Verifies the object.
void Verify();
#endif
@@ -810,14 +842,14 @@ class MaybeObject BASE_EMBEDDED {
V(JSModule) \
V(Map) \
V(DescriptorArray) \
+ V(TransitionArray) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
V(Context) \
- V(GlobalContext) \
- V(ModuleContext) \
+ V(NativeContext) \
V(ScopeInfo) \
V(JSFunction) \
V(Code) \
@@ -853,6 +885,7 @@ class MaybeObject BASE_EMBEDDED {
V(UndetectableObject) \
V(AccessCheckNeeded) \
V(JSGlobalPropertyCell) \
+ V(ObjectHashTable) \
class JSReceiver;
@@ -913,8 +946,8 @@ class Object : public MaybeObject {
Object* ToBoolean(); // ECMA-262 9.2.
// Convert to a JSObject if needed.
- // global_context is used when creating wrapper object.
- MUST_USE_RESULT MaybeObject* ToObject(Context* global_context);
+ // native_context is used when creating wrapper object.
+ MUST_USE_RESULT MaybeObject* ToObject(Context* native_context);
// Converts this to a Smi if possible.
// Failure is returned otherwise.
@@ -974,11 +1007,13 @@ class Object : public MaybeObject {
// < the length of the string. Used to implement [] on strings.
inline bool IsStringObjectWithCharacterAt(uint32_t index);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Verify a pointer is a valid object pointer.
static void VerifyPointer(Object* p);
#endif
+ inline void VerifyApiCallResultType();
+
// Prints this object without details.
inline void ShortPrint() {
ShortPrint(stdout);
@@ -1027,9 +1062,8 @@ class Smi: public Object {
}
void SmiPrint(FILE* out);
void SmiPrint(StringStream* accumulator);
-#ifdef DEBUG
- void SmiVerify();
-#endif
+
+ DECLARE_VERIFIER(Smi)
static const int kMinValue =
(static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
@@ -1100,9 +1134,8 @@ class Failure: public MaybeObject {
}
void FailurePrint(FILE* out);
void FailurePrint(StringStream* accumulator);
-#ifdef DEBUG
- void FailureVerify();
-#endif
+
+ DECLARE_VERIFIER(Failure)
private:
inline intptr_t value() const;
@@ -1233,9 +1266,8 @@ class HeapObject: public Object {
void HeapObjectPrint(FILE* out);
void PrintHeader(FILE* out, const char* id);
#endif
-
-#ifdef DEBUG
- void HeapObjectVerify();
+ DECLARE_VERIFIER(HeapObject)
+#ifdef VERIFY_HEAP
inline void VerifyObjectField(int offset);
inline void VerifySmiField(int offset);
@@ -1263,9 +1295,6 @@ class HeapObject: public Object {
};
-#define SLOT_ADDR(obj, offset) \
- reinterpret_cast<Object**>((obj)->address() + offset)
-
// This class describes a body of an object of a fixed size
// in which all pointer fields are located in the [start_offset, end_offset)
// interval.
@@ -1280,8 +1309,8 @@ class FixedBodyDescriptor {
template<typename StaticVisitor>
static inline void IterateBody(HeapObject* obj) {
- StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
- SLOT_ADDR(obj, end_offset));
+ StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
}
};
@@ -1300,13 +1329,11 @@ class FlexibleBodyDescriptor {
template<typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
- StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
- SLOT_ADDR(obj, object_size));
+ StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, object_size));
}
};
-#undef SLOT_ADDR
-
// The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer)
@@ -1326,9 +1353,7 @@ class HeapNumber: public HeapObject {
}
void HeapNumberPrint(FILE* out);
void HeapNumberPrint(StringStream* accumulator);
-#ifdef DEBUG
- void HeapNumberVerify();
-#endif
+ DECLARE_VERIFIER(HeapNumber)
inline int get_exponent();
inline int get_sign();
@@ -1392,6 +1417,20 @@ class JSReceiver: public HeapObject {
FORCE_DELETION
};
+ // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
+ // a keyed store is of the form a[expression] = foo.
+ enum StoreFromKeyed {
+ MAY_BE_STORE_FROM_KEYED,
+ CERTAINLY_NOT_STORE_FROM_KEYED
+ };
+
+ // Internal properties (e.g. the hidden properties dictionary) might
+ // be added even though the receiver is non-extensible.
+ enum ExtensibilityCheck {
+ PERFORM_EXTENSIBILITY_CHECK,
+ OMIT_EXTENSIBILITY_CHECK
+ };
+
// Casting.
static inline JSReceiver* cast(Object* obj);
@@ -1402,16 +1441,20 @@ class JSReceiver: public HeapObject {
StrictModeFlag strict_mode,
bool skip_fallback_interceptor = false);
// Can cause GC.
- MUST_USE_RESULT MaybeObject* SetProperty(String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool skip_fallback_interceptor = false);
- MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
- String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetProperty(
+ String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED,
+ bool skip_fallback_interceptor = false);
+ MUST_USE_RESULT MaybeObject* SetProperty(
+ LookupResult* result,
+ String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
Object* value);
@@ -1441,14 +1484,21 @@ class JSReceiver: public HeapObject {
String* name);
PropertyAttributes GetLocalPropertyAttribute(String* name);
+ inline PropertyAttributes GetElementAttribute(uint32_t index);
+ inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
+
// Can cause a GC.
inline bool HasProperty(String* name);
inline bool HasLocalProperty(String* name);
inline bool HasElement(uint32_t index);
+ inline bool HasLocalElement(uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
+ // Return the constructor function (may be Heap::null_value()).
+ inline Object* GetConstructor();
+
// Set the object's prototype (only JSReceiver and null are allowed).
MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
bool skip_hidden_prototypes);
@@ -1470,10 +1520,10 @@ class JSReceiver: public HeapObject {
Smi* GenerateIdentityHash();
private:
- PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
+ PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
@@ -1515,13 +1565,19 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT inline MaybeObject* ResetElements();
inline ElementsKind GetElementsKind();
inline ElementsAccessor* GetElementsAccessor();
- inline bool HasFastSmiOnlyElements();
- inline bool HasFastElements();
- // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT
- // elements. TODO(danno): Rename HasFastTypeElements to HasFastElements() and
- // HasFastElements to HasFastObjectElements.
- inline bool HasFastTypeElements();
+ // Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind.
+ inline bool HasFastSmiElements();
+ // Returns true if an object has elements of FAST_ELEMENTS ElementsKind.
+ inline bool HasFastObjectElements();
+ // Returns true if an object has elements of FAST_ELEMENTS or
+ // FAST_SMI_ONLY_ELEMENTS.
+ inline bool HasFastSmiOrObjectElements();
+ // Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
+ // ElementsKind.
inline bool HasFastDoubleElements();
+ // Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS
+ // ElementsKind.
+ inline bool HasFastHoleyElements();
inline bool HasNonStrictArgumentsElements();
inline bool HasDictionaryElements();
inline bool HasExternalPixelElements();
@@ -1563,7 +1619,8 @@ class JSObject: public JSReceiver {
String* key,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode);
MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
LookupResult* result,
String* name,
@@ -1585,7 +1642,8 @@ class JSObject: public JSReceiver {
String* name,
Object* value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ ExtensibilityCheck extensibility_check);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
@@ -1593,6 +1651,18 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes);
+ // Try to follow an existing transition to a field with attributes NONE. The
+ // return value indicates whether the transition was successful.
+ static inline bool TryTransitionToField(Handle<JSObject> object,
+ Handle<String> key);
+
+ inline int LastAddedFieldIndex();
+
+ // Extend the receiver with a single fast property appeared first in the
+ // passed map. This also extends the property backing store if necessary.
+ static void AddFastPropertyUsingMap(Handle<JSObject> object, Handle<Map> map);
+ inline MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* map);
+
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
String* key,
@@ -1622,6 +1692,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(String* name,
DeleteMode mode);
+ MUST_USE_RESULT MaybeObject* OptimizeAsPrototype();
+
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
InterceptorInfo* GetIndexedInterceptor();
@@ -1638,16 +1710,28 @@ class JSObject: public JSReceiver {
LookupResult* result,
String* name,
bool continue_search);
+ PropertyAttributes GetElementAttributeWithReceiver(JSReceiver* receiver,
+ uint32_t index,
+ bool continue_search);
static void DefineAccessor(Handle<JSObject> object,
Handle<String> name,
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes);
+ // Can cause GC.
MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
Object* getter,
Object* setter,
PropertyAttributes attributes);
+ // Try to define a single accessor paying attention to map transitions.
+ // Returns a JavaScript null if this was not possible and we have to use the
+ // slow case. Note that we can fail due to allocations, too.
+ MUST_USE_RESULT MaybeObject* DefineFastAccessor(
+ String* name,
+ AccessorComponent component,
+ Object* accessor,
+ PropertyAttributes attributes);
Object* LookupAccessor(String* name, AccessorComponent component);
MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
@@ -1659,15 +1743,15 @@ class JSObject: public JSReceiver {
String* name,
PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor(
- JSReceiver* receiver,
+ Object* receiver,
String* name,
PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor(
- JSReceiver* receiver,
+ Object* receiver,
String* name,
PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor(
- JSReceiver* receiver,
+ Object* receiver,
String* name,
PropertyAttributes* attributes);
@@ -1707,16 +1791,17 @@ class JSObject: public JSReceiver {
static int GetIdentityHash(Handle<JSObject> obj);
MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
- MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag);
+ MUST_USE_RESULT MaybeObject* SetIdentityHash(Smi* hash, CreationFlag flag);
static Handle<Object> DeleteProperty(Handle<JSObject> obj,
Handle<String> name);
+ // Can cause GC.
MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
- inline void ValidateSmiOnlyElements();
+ inline void ValidateElements();
// Makes sure that this object can contain HeapObject as elements.
MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements();
@@ -1728,6 +1813,7 @@ class JSObject: public JSReceiver {
EnsureElementsMode mode);
MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
FixedArrayBase* elements,
+ uint32_t length,
EnsureElementsMode mode);
MUST_USE_RESULT MaybeObject* EnsureCanContainElements(
Arguments* arguments,
@@ -1748,9 +1834,6 @@ class JSObject: public JSReceiver {
// be represented as a double and not a Smi.
bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements);
- // Tells whether the index'th element is present.
- bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
-
// Computes the new capacity when expanding the elements of a JSObject.
static int NewElementsCapacity(int old_capacity) {
// (old_capacity + 50%) + 16
@@ -1775,9 +1858,7 @@ class JSObject: public JSReceiver {
DICTIONARY_ELEMENT
};
- LocalElementType HasLocalElement(uint32_t index);
-
- bool HasElementWithInterceptor(JSReceiver* receiver, uint32_t index);
+ LocalElementType GetLocalElementType(uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
Object* value,
@@ -1826,10 +1907,10 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver,
uint32_t index);
- enum SetFastElementsCapacityMode {
- kAllowSmiOnlyElements,
- kForceSmiOnlyElements,
- kDontAllowSmiOnlyElements
+ enum SetFastElementsCapacitySmiMode {
+ kAllowSmiElements,
+ kForceSmiElements,
+ kDontAllowSmiElements
};
// Replace the elements' backing store with fast elements of the given
@@ -1838,7 +1919,7 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
int capacity,
int length,
- SetFastElementsCapacityMode set_capacity_mode);
+ SetFastElementsCapacitySmiMode smi_mode);
MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
int capacity,
int length);
@@ -1870,10 +1951,9 @@ class JSObject: public JSReceiver {
void LocalLookupRealNamedProperty(String* name, LookupResult* result);
void LookupRealNamedProperty(String* name, LookupResult* result);
void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
- void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
- void LookupCallback(String* name, LookupResult* result);
+ void LookupCallbackProperty(String* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
@@ -1901,7 +1981,8 @@ class JSObject: public JSReceiver {
// new_map.
MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* new_map,
String* name,
- Object* value);
+ Object* value,
+ int field_index);
// Add a constant function property to a fast-case object.
// This leaves a CONSTANT_TRANSITION in the old map, and
@@ -1934,39 +2015,40 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
- // Converts a descriptor of any other type to a real field,
- // backed by the properties array. Descriptors of visible
- // types, such as CONSTANT_FUNCTION, keep their enumeration order.
- // Converts the descriptor on the original object's map to a
- // map transition, and the the new field is on the object's new map.
- MUST_USE_RESULT MaybeObject* ConvertDescriptorToFieldAndMapTransition(
+ // Replaces an existing transition with a transition to a map with a FIELD.
+ MUST_USE_RESULT MaybeObject* ConvertTransitionToMapTransition(
+ int transition_index,
String* name,
Object* new_value,
PropertyAttributes attributes);
- // Converts a descriptor of any other type to a real field,
- // backed by the properties array. Descriptors of visible
- // types, such as CONSTANT_FUNCTION, keep their enumeration order.
+ // Converts a descriptor of any other type to a real field, backed by the
+ // properties array.
MUST_USE_RESULT MaybeObject* ConvertDescriptorToField(
String* name,
Object* new_value,
PropertyAttributes attributes);
// Add a property to a fast-case object.
- MUST_USE_RESULT MaybeObject* AddFastProperty(String* name,
- Object* value,
- PropertyAttributes attributes);
+ MUST_USE_RESULT MaybeObject* AddFastProperty(
+ String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
// Add a property to a slow-case object.
MUST_USE_RESULT MaybeObject* AddSlowProperty(String* name,
Object* value,
PropertyAttributes attributes);
- // Add a property to an object.
- MUST_USE_RESULT MaybeObject* AddProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ // Add a property to an object. May cause GC.
+ MUST_USE_RESULT MaybeObject* AddProperty(
+ String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2041,9 +2123,7 @@ class JSObject: public JSReceiver {
}
void JSObjectPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSObjectVerify();
-#endif
+ DECLARE_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
inline void PrintProperties() {
PrintProperties(stdout);
@@ -2054,6 +2134,10 @@ class JSObject: public JSReceiver {
PrintElements(stdout);
}
void PrintElements(FILE* out);
+ inline void PrintTransitions() {
+ PrintTransitions(stdout);
+ }
+ void PrintTransitions(FILE* out);
#endif
void PrintElementsTransition(
@@ -2086,7 +2170,7 @@ class JSObject: public JSReceiver {
// Maximal number of fast properties for the JSObject. Used to
// restrict the number of map transitions to avoid an explosion in
// the number of maps for objects used as dictionaries.
- inline int MaxFastProperties();
+ inline bool TooManyFastProperties(int properties, StoreFromKeyed store_mode);
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
@@ -2108,7 +2192,8 @@ class JSObject: public JSReceiver {
static const int kMaxUncheckedOldFastElementsLength = 500;
static const int kInitialMaxFastElementArray = 100000;
- static const int kMaxFastProperties = 12;
+ static const int kFastPropertiesSoftLimit = 12;
+ static const int kMaxFastProperties = 64;
static const int kMaxInstanceSize = 255 * kPointerSize;
// When extending the backing storage for property values, we increase
// its size by more than the 1 entry necessary, so sequentially adding fields
@@ -2127,6 +2212,15 @@ class JSObject: public JSReceiver {
static inline int SizeOf(Map* map, HeapObject* object);
};
+ // Enqueue change record for Object.observe. May cause GC.
+ static void EnqueueChangeRecord(Handle<JSObject> object,
+ const char* type,
+ Handle<String> name,
+ Handle<Object> old_value);
+
+ // Deliver change records to observers. May cause GC.
+ static void DeliverChangeRecords(Isolate* isolate);
+
private:
friend class DictionaryElementsAccessor;
@@ -2134,6 +2228,14 @@ class JSObject: public JSReceiver {
Object* structure,
uint32_t index,
Object* holder);
+ MUST_USE_RESULT PropertyAttributes GetElementAttributeWithInterceptor(
+ JSReceiver* receiver,
+ uint32_t index,
+ bool continue_search);
+ MUST_USE_RESULT PropertyAttributes GetElementAttributeWithoutInterceptor(
+ JSReceiver* receiver,
+ uint32_t index,
+ bool continue_search);
MUST_USE_RESULT MaybeObject* SetElementWithCallback(
Object* structure,
uint32_t index,
@@ -2155,17 +2257,16 @@ class JSObject: public JSReceiver {
bool check_prototype,
SetPropertyMode set_mode);
- // Searches the prototype chain for a callback setter and sets the property
- // with the setter if it finds one. The '*found' flag indicates whether
- // a setter was found or not.
- // This function can cause GC and can return a failure result with
- // '*found==true'.
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallbackSetterInPrototypes(
+ // Searches the prototype chain for property 'name'. If it is found and
+ // has a setter, invoke it and set '*done' to true. If it is found and is
+ // read-only, reject and set '*done' to true. Otherwise, set '*done' to
+ // false. Can cause GC and can return a failure result with '*done==true'.
+ MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypes(
String* name,
Object* value,
PropertyAttributes attributes,
- bool* found,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ bool* done);
MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
DeleteMode mode);
@@ -2207,18 +2308,23 @@ class JSObject: public JSReceiver {
Object* getter,
Object* setter,
PropertyAttributes attributes);
- void LookupInDescriptor(String* name, LookupResult* result);
-
- // Returns the hidden properties backing store object, currently
- // a StringDictionary, stored on this object.
- // If no hidden properties object has been put on this object,
- // return undefined, unless create_if_absent is true, in which case
- // a new dictionary is created, added to this object, and returned.
- MUST_USE_RESULT MaybeObject* GetHiddenPropertiesDictionary(
- bool create_if_absent);
- // Updates the existing hidden properties dictionary.
- MUST_USE_RESULT MaybeObject* SetHiddenPropertiesDictionary(
- StringDictionary* dictionary);
+
+
+ enum InitializeHiddenProperties {
+ CREATE_NEW_IF_ABSENT,
+ ONLY_RETURN_INLINE_VALUE
+ };
+
+ // If create_if_absent is true, return the hash table backing store
+ // for hidden properties. If there is no backing store, allocate one.
+ // If create_if_absent is false, return the hash table backing store
+ // or the inline stored identity hash, whatever is found.
+ MUST_USE_RESULT MaybeObject* GetHiddenPropertiesHashTable(
+ InitializeHiddenProperties init_option);
+ // Set the hidden property backing store to either a hash table or
+ // the inline-stored identity hash.
+ MUST_USE_RESULT MaybeObject* SetHiddenPropertiesHashTable(
+ Object* value);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2242,6 +2348,8 @@ class FixedArrayBase: public HeapObject {
class FixedDoubleArray;
+class IncrementalMarking;
+
// FixedArray describes fixed-sized arrays with element type Object*.
class FixedArray: public FixedArrayBase {
@@ -2314,8 +2422,8 @@ class FixedArray: public FixedArrayBase {
}
void FixedArrayPrint(FILE* out);
#endif
+ DECLARE_VERIFIER(FixedArray)
#ifdef DEBUG
- void FixedArrayVerify();
// Checks if two FixedArrays have identical contents.
bool IsEqualTo(FixedArray* other);
#endif
@@ -2401,34 +2509,40 @@ class FixedDoubleArray: public FixedArrayBase {
}
void FixedDoubleArrayPrint(FILE* out);
#endif
-
-#ifdef DEBUG
- void FixedDoubleArrayVerify();
-#endif
+ DECLARE_VERIFIER(FixedDoubleArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
};
-class IncrementalMarking;
-
-
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
-// TODO(1399): It should be possible to make room for bit_field3 in the map
-// without overloading the instance descriptors field in the map
-// (and storing it in the DescriptorArray when the map has one).
-// [0]: storage for bit_field3 for Map owning this object (Smi)
-// [1]: point to a fixed array with (value, detail) pairs.
-// [2]: next enumeration index (Smi), or pointer to small fixed array:
-// [0]: next enumeration index (Smi)
-// [1]: pointer to fixed array with enum cache
-// [3]: first key
-// [length() - 1]: last key
-//
+// [0]: Number of descriptors
+// [1]: Either Smi(0) if uninitialized, or a pointer to small fixed array:
+// [0]: pointer to fixed array with enum cache
+// [1]: either Smi(0) or pointer to fixed array with indices
+// [2]: first key
+// [2 + number of descriptors * kDescriptorSize]: start of slack
class DescriptorArray: public FixedArray {
public:
+ // WhitenessWitness is used to prove that a descriptor array is white
+ // (unmarked), so incremental write barriers can be skipped because the
+ // marking invariant cannot be broken and slots pointing into evacuation
+ // candidates will be discovered when the object is scanned. A witness is
+ // always stack-allocated right after creating an array. By allocating a
+ // witness, incremental marking is globally disabled. The witness is then
+ // passed along wherever needed to statically prove that the array is known to
+ // be white.
+ class WhitenessWitness {
+ public:
+ inline explicit WhitenessWitness(FixedArray* array);
+ inline ~WhitenessWitness();
+
+ private:
+ IncrementalMarking* marking_;
+ };
+
// Returns true for both shared empty_descriptor_array and for smis, which the
// map uses to encode additional bit fields when the descriptor array is not
// yet used.
@@ -2436,43 +2550,58 @@ class DescriptorArray: public FixedArray {
// Returns the number of descriptors in the array.
int number_of_descriptors() {
- ASSERT(length() > kFirstIndex || IsEmpty());
+ ASSERT(length() >= kFirstIndex || IsEmpty());
int len = length();
- return len <= kFirstIndex ? 0 : len - kFirstIndex;
+ return len == 0 ? 0 : Smi::cast(get(kDescriptorLengthIndex))->value();
}
- int NextEnumerationIndex() {
- if (IsEmpty()) return PropertyDetails::kInitialIndex;
- Object* obj = get(kEnumerationIndexIndex);
- if (obj->IsSmi()) {
- return Smi::cast(obj)->value();
- } else {
- Object* index = FixedArray::cast(obj)->get(kEnumCacheBridgeEnumIndex);
- return Smi::cast(index)->value();
- }
+ int number_of_descriptors_storage() {
+ int len = length();
+ return len == 0 ? 0 : (len - kFirstIndex) / kDescriptorSize;
}
- // Set next enumeration index and flush any enum cache.
- void SetNextEnumerationIndex(int value) {
- if (!IsEmpty()) {
- set(kEnumerationIndexIndex, Smi::FromInt(value));
- }
+ int NumberOfSlackDescriptors() {
+ return number_of_descriptors_storage() - number_of_descriptors();
}
+
+ inline void SetNumberOfDescriptors(int number_of_descriptors);
+ inline int number_of_entries() { return number_of_descriptors(); }
+
bool HasEnumCache() {
- return !IsEmpty() && !get(kEnumerationIndexIndex)->IsSmi();
+ return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi();
+ }
+
+ void CopyEnumCacheFrom(DescriptorArray* array) {
+ set(kEnumCacheIndex, array->get(kEnumCacheIndex));
+ }
+
+ FixedArray* GetEnumCache() {
+ ASSERT(HasEnumCache());
+ FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
+ return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex));
+ }
+
+ bool HasEnumIndicesCache() {
+ if (IsEmpty()) return false;
+ Object* object = get(kEnumCacheIndex);
+ if (object->IsSmi()) return false;
+ FixedArray* bridge = FixedArray::cast(object);
+ return !bridge->get(kEnumCacheBridgeIndicesCacheIndex)->IsSmi();
+ }
+
+ FixedArray* GetEnumIndicesCache() {
+ ASSERT(HasEnumIndicesCache());
+ FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
+ return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex));
}
- Object* GetEnumCache() {
+ Object** GetEnumCacheSlot() {
ASSERT(HasEnumCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumerationIndexIndex));
- return bridge->get(kEnumCacheBridgeCacheIndex);
+ return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
+ kEnumCacheOffset);
}
- // TODO(1399): It should be possible to make room for bit_field3 in the map
- // without overloading the instance descriptors field in the map
- // (and storing it in the DescriptorArray when the map has one).
- inline int bit_field3_storage();
- inline void set_bit_field3_storage(int value);
+ void ClearEnumCache();
// Initialize or change the enum cache,
// using the supplied storage for the small "bridge".
@@ -2482,92 +2611,56 @@ class DescriptorArray: public FixedArray {
// Accessors for fetching instance descriptor at descriptor number.
inline String* GetKey(int descriptor_number);
+ inline Object** GetKeySlot(int descriptor_number);
inline Object* GetValue(int descriptor_number);
+ inline Object** GetValueSlot(int descriptor_number);
inline PropertyDetails GetDetails(int descriptor_number);
inline PropertyType GetType(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
inline JSFunction* GetConstantFunction(int descriptor_number);
inline Object* GetCallbacksObject(int descriptor_number);
inline AccessorDescriptor* GetCallbacks(int descriptor_number);
- inline bool IsProperty(int descriptor_number);
- inline bool IsTransitionOnly(int descriptor_number);
- inline bool IsNullDescriptor(int descriptor_number);
- class WhitenessWitness {
- public:
- inline explicit WhitenessWitness(DescriptorArray* array);
- inline ~WhitenessWitness();
-
- private:
- IncrementalMarking* marking_;
- };
+ inline String* GetSortedKey(int descriptor_number);
+ inline int GetSortedKeyIndex(int descriptor_number);
+ inline void SetSortedKey(int pointer, int descriptor_number);
// Accessor for complete descriptor.
inline void Get(int descriptor_number, Descriptor* desc);
inline void Set(int descriptor_number,
Descriptor* desc,
const WhitenessWitness&);
+ inline void Set(int descriptor_number, Descriptor* desc);
- // Transfer a complete descriptor from the src descriptor array to the dst
- // one, dropping map transitions in CALLBACKS.
- static void CopyFrom(Handle<DescriptorArray> dst,
- int dst_index,
- Handle<DescriptorArray> src,
- int src_index,
- const WhitenessWitness& witness);
+ // Append automatically sets the enumeration index. This should only be used
+ // to add descriptors in bulk at the end, followed by sorting the descriptor
+ // array.
+ inline void Append(Descriptor* desc, const WhitenessWitness&);
+ inline void Append(Descriptor* desc);
// Transfer a complete descriptor from the src descriptor array to this
- // descriptor array, dropping map transitions in CALLBACKS.
- MUST_USE_RESULT MaybeObject* CopyFrom(int dst_index,
- DescriptorArray* src,
- int src_index,
- const WhitenessWitness&);
-
- // Copy the descriptor array, insert a new descriptor and optionally
- // remove map transitions. If the descriptor is already present, it is
- // replaced. If a replaced descriptor is a real property (not a transition
- // or null), its enumeration index is kept as is.
- // If adding a real property, map transitions must be removed. If adding
- // a transition, they must not be removed. All null descriptors are removed.
- MUST_USE_RESULT MaybeObject* CopyInsert(Descriptor* descriptor,
- TransitionFlag transition_flag);
-
- // Return a copy of the array with all transitions and null descriptors
- // removed. Return a Failure object in case of an allocation failure.
- MUST_USE_RESULT MaybeObject* RemoveTransitions();
+ // descriptor array.
+ void CopyFrom(int dst_index,
+ DescriptorArray* src,
+ int src_index,
+ const WhitenessWitness&);
- // Sort the instance descriptors by the hash codes of their keys.
- // Does not check for duplicates.
- void SortUnchecked(const WhitenessWitness&);
+ MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index);
// Sort the instance descriptors by the hash codes of their keys.
- // Checks the result for duplicates.
- void Sort(const WhitenessWitness&);
+ void Sort();
// Search the instance descriptors for given name.
- inline int Search(String* name);
+ INLINE(int Search(String* name, int number_of_own_descriptors));
// As the above, but uses DescriptorLookupCache and updates it when
// necessary.
- inline int SearchWithCache(String* name);
-
- // Tells whether the name is present int the array.
- bool Contains(String* name) { return kNotFound != Search(name); }
-
- // Perform a binary search in the instance descriptors represented
- // by this fixed array. low and high are descriptor indices. If there
- // are three instance descriptors in this array it should be called
- // with low=0 and high=2.
- int BinarySearch(String* name, int low, int high);
-
- // Perform a linear search in the instance descriptors represented
- // by this fixed array. len is the number of descriptor indices that are
- // valid. Does not require the descriptors to be sorted.
- int LinearSearch(String* name, int len);
+ INLINE(int SearchWithCache(String* name, Map* map));
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors);
+ MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors,
+ int slack = 0);
// Casting.
static inline DescriptorArray* cast(Object* obj);
@@ -2575,27 +2668,28 @@ class DescriptorArray: public FixedArray {
// Constant for denoting key was not found.
static const int kNotFound = -1;
- static const int kBitField3StorageIndex = 0;
- static const int kContentArrayIndex = 1;
- static const int kEnumerationIndexIndex = 2;
- static const int kFirstIndex = 3;
+ static const int kDescriptorLengthIndex = 0;
+ static const int kEnumCacheIndex = 1;
+ static const int kFirstIndex = 2;
// The length of the "bridge" to the enum cache.
- static const int kEnumCacheBridgeLength = 3;
- static const int kEnumCacheBridgeEnumIndex = 0;
- static const int kEnumCacheBridgeCacheIndex = 1;
- static const int kEnumCacheBridgeIndicesCacheIndex = 2;
+ static const int kEnumCacheBridgeLength = 2;
+ static const int kEnumCacheBridgeCacheIndex = 0;
+ static const int kEnumCacheBridgeIndicesCacheIndex = 1;
// Layout description.
- static const int kBitField3StorageOffset = FixedArray::kHeaderSize;
- static const int kContentArrayOffset = kBitField3StorageOffset + kPointerSize;
- static const int kEnumerationIndexOffset = kContentArrayOffset + kPointerSize;
- static const int kFirstOffset = kEnumerationIndexOffset + kPointerSize;
+ static const int kDescriptorLengthOffset = FixedArray::kHeaderSize;
+ static const int kEnumCacheOffset = kDescriptorLengthOffset + kPointerSize;
+ static const int kFirstOffset = kEnumCacheOffset + kPointerSize;
// Layout description for the bridge array.
- static const int kEnumCacheBridgeEnumOffset = FixedArray::kHeaderSize;
- static const int kEnumCacheBridgeCacheOffset =
- kEnumCacheBridgeEnumOffset + kPointerSize;
+ static const int kEnumCacheBridgeCacheOffset = FixedArray::kHeaderSize;
+
+ // Layout of descriptor.
+ static const int kDescriptorKey = 0;
+ static const int kDescriptorDetails = 1;
+ static const int kDescriptorValue = 2;
+ static const int kDescriptorSize = 3;
#ifdef OBJECT_PRINT
// Print all the descriptors.
@@ -2607,7 +2701,7 @@ class DescriptorArray: public FixedArray {
#ifdef DEBUG
// Is the descriptor array sorted and without duplicates?
- bool IsSortedNoDuplicates();
+ bool IsSortedNoDuplicates(int valid_descriptors = -1);
// Is the descriptor array consistent with the back pointers in targets?
bool IsConsistentWithBackPointers(Map* current_map);
@@ -2620,6 +2714,12 @@ class DescriptorArray: public FixedArray {
// fit in a page).
static const int kMaxNumberOfDescriptors = 1024 + 512;
+ // Returns the fixed array length required to hold number_of_descriptors
+ // descriptors.
+ static int LengthFor(int number_of_descriptors) {
+ return ToKeyIndex(number_of_descriptors);
+ }
+
private:
// An entry in a DescriptorArray, represented as an (array, index) pair.
class Entry {
@@ -2637,32 +2737,40 @@ class DescriptorArray: public FixedArray {
// Conversion from descriptor number to array indices.
static int ToKeyIndex(int descriptor_number) {
- return descriptor_number+kFirstIndex;
+ return kFirstIndex +
+ (descriptor_number * kDescriptorSize) +
+ kDescriptorKey;
}
static int ToDetailsIndex(int descriptor_number) {
- return (descriptor_number << 1) + 1;
+ return kFirstIndex +
+ (descriptor_number * kDescriptorSize) +
+ kDescriptorDetails;
}
static int ToValueIndex(int descriptor_number) {
- return descriptor_number << 1;
+ return kFirstIndex +
+ (descriptor_number * kDescriptorSize) +
+ kDescriptorValue;
}
- // Swap operation on FixedArray without using write barriers.
- static inline void NoIncrementalWriteBarrierSwap(
- FixedArray* array, int first, int second);
-
- // Swap descriptor first and second.
- inline void NoIncrementalWriteBarrierSwapDescriptors(
- int first, int second);
+ // Swap first and second descriptor.
+ inline void SwapSortedKeys(int first, int second);
- FixedArray* GetContentArray() {
- return FixedArray::cast(get(kContentArrayIndex));
- }
DISALLOW_IMPLICIT_CONSTRUCTORS(DescriptorArray);
};
+enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
+
+template<SearchMode search_mode, typename T>
+inline int LinearSearch(T* array, String* name, int len, int valid_entries);
+
+
+template<SearchMode search_mode, typename T>
+inline int Search(T* array, String* name, int valid_entries = 0);
+
+
// HashTable is a subclass of FixedArray that implements a hash table
// that uses open addressing and quadratic probing.
//
@@ -2715,6 +2823,11 @@ class BaseShape {
template<typename Shape, typename Key>
class HashTable: public FixedArray {
public:
+ enum MinimumCapacity {
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ USE_CUSTOM_MINIMUM_CAPACITY
+ };
+
// Wrapper methods
inline uint32_t Hash(Key key) {
if (Shape::UsesSeed) {
@@ -2767,6 +2880,7 @@ class HashTable: public FixedArray {
// Returns a new HashTable object. Might return Failure.
MUST_USE_RESULT static MaybeObject* Allocate(
int at_least_space_for,
+ MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY,
PretenureFlag pretenure = NOT_TENURED);
// Computes the required capacity for a table holding the given
@@ -2856,11 +2970,12 @@ class HashTable: public FixedArray {
return (hash + GetProbeOffset(number)) & (size - 1);
}
- static uint32_t FirstProbe(uint32_t hash, uint32_t size) {
+ inline static uint32_t FirstProbe(uint32_t hash, uint32_t size) {
return hash & (size - 1);
}
- static uint32_t NextProbe(uint32_t last, uint32_t number, uint32_t size) {
+ inline static uint32_t NextProbe(
+ uint32_t last, uint32_t number, uint32_t size) {
return (last + number) & (size - 1);
}
@@ -2947,6 +3062,8 @@ class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
private:
MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s);
+ template <bool seq_ascii> friend class JsonParser;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(SymbolTable);
};
@@ -3044,6 +3161,7 @@ class Dictionary: public HashTable<Shape, Key> {
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
+ ASSERT(index != 0);
this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
}
@@ -3117,7 +3235,9 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
}
// Copies enumerable keys to preallocated fixed array.
- void CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array);
+ FixedArray* CopyEnumKeysTo(FixedArray* storage);
+ static void DoGenerateNewEnumerationIndices(
+ Handle<StringDictionary> dictionary);
// For transforming properties of a JSObject.
MUST_USE_RESULT MaybeObject* TransformPropertiesToFastFor(
@@ -3127,8 +3247,6 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
// Find entry for key, otherwise return kNotFound. Optimized version of
// HashTable::FindEntry.
int FindEntry(String* key);
-
- bool ContainsTransition(int entry);
};
@@ -3274,12 +3392,12 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
return reinterpret_cast<ObjectHashTable*>(obj);
}
- // Looks up the value associated with the given key. The undefined value is
+ // Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
Object* Lookup(Object* key);
// Adds (or overwrites) the value associated with the given key. Mapping a
- // key to the undefined value causes removal of the whole entry.
+ // key to the hole value causes removal of the whole entry.
MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
private:
@@ -3327,9 +3445,7 @@ class JSFunctionResultCache: public FixedArray {
// Casting
static inline JSFunctionResultCache* cast(Object* obj);
-#ifdef DEBUG
- void JSFunctionResultCacheVerify();
-#endif
+ DECLARE_VERIFIER(JSFunctionResultCache)
};
@@ -3436,7 +3552,7 @@ class ScopeInfo : public FixedArray {
// must be a symbol (canonicalized).
int FunctionContextSlotIndex(String* name, VariableMode* mode);
- static Handle<ScopeInfo> Create(Scope* scope);
+ static Handle<ScopeInfo> Create(Scope* scope, Zone* zone);
// Serializes empty scope info.
static ScopeInfo* Empty();
@@ -3482,7 +3598,7 @@ class ScopeInfo : public FixedArray {
FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
#undef DECL_INDEX
#undef FOR_EACH_NUMERIC_FIELD
- kVariablePartIndex
+ kVariablePartIndex
};
// The layout of the variable part of a ScopeInfo is as follows:
@@ -3556,9 +3672,7 @@ class NormalizedMapCache: public FixedArray {
// Casting
static inline NormalizedMapCache* cast(Object* obj);
-#ifdef DEBUG
- void NormalizedMapCacheVerify();
-#endif
+ DECLARE_VERIFIER(NormalizedMapCache)
};
@@ -3607,9 +3721,7 @@ class ByteArray: public FixedArrayBase {
}
void ByteArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ByteArrayVerify();
-#endif
+ DECLARE_VERIFIER(ByteArray)
// Layout description.
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
@@ -3643,9 +3755,7 @@ class FreeSpace: public HeapObject {
}
void FreeSpacePrint(FILE* out);
#endif
-#ifdef DEBUG
- void FreeSpaceVerify();
-#endif
+ DECLARE_VERIFIER(FreeSpace)
// Layout description.
// Size is smi tagged when it is stored.
@@ -3725,9 +3835,7 @@ class ExternalPixelArray: public ExternalArray {
}
void ExternalPixelArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalPixelArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalPixelArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalPixelArray);
@@ -3754,9 +3862,7 @@ class ExternalByteArray: public ExternalArray {
}
void ExternalByteArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalByteArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalByteArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalByteArray);
@@ -3783,9 +3889,7 @@ class ExternalUnsignedByteArray: public ExternalArray {
}
void ExternalUnsignedByteArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalUnsignedByteArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalUnsignedByteArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedByteArray);
@@ -3812,9 +3916,7 @@ class ExternalShortArray: public ExternalArray {
}
void ExternalShortArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalShortArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalShortArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalShortArray);
@@ -3841,9 +3943,7 @@ class ExternalUnsignedShortArray: public ExternalArray {
}
void ExternalUnsignedShortArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalUnsignedShortArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalUnsignedShortArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedShortArray);
@@ -3870,9 +3970,7 @@ class ExternalIntArray: public ExternalArray {
}
void ExternalIntArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalIntArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalIntArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalIntArray);
@@ -3899,9 +3997,7 @@ class ExternalUnsignedIntArray: public ExternalArray {
}
void ExternalUnsignedIntArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalUnsignedIntArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalUnsignedIntArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedIntArray);
@@ -3928,9 +4024,7 @@ class ExternalFloatArray: public ExternalArray {
}
void ExternalFloatArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ExternalFloatArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalFloatArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloatArray);
@@ -3957,9 +4051,7 @@ class ExternalDoubleArray: public ExternalArray {
}
void ExternalDoubleArrayPrint(FILE* out);
#endif // OBJECT_PRINT
-#ifdef DEBUG
- void ExternalDoubleArrayVerify();
-#endif // DEBUG
+ DECLARE_VERIFIER(ExternalDoubleArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalDoubleArray);
@@ -3984,7 +4076,7 @@ class DeoptimizationInputData: public FixedArray {
static const int kFirstDeoptEntryIndex = 5;
// Offsets of deopt entry elements relative to the start of the entry.
- static const int kAstIdOffset = 0;
+ static const int kAstIdRawOffset = 0;
static const int kTranslationIndexOffset = 1;
static const int kArgumentsStackHeightOffset = 2;
static const int kPcOffset = 3;
@@ -4016,13 +4108,21 @@ class DeoptimizationInputData: public FixedArray {
set(IndexForEntry(i) + k##name##Offset, value); \
}
- DEFINE_ENTRY_ACCESSORS(AstId, Smi)
+ DEFINE_ENTRY_ACCESSORS(AstIdRaw, Smi)
DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
DEFINE_ENTRY_ACCESSORS(Pc, Smi)
#undef DEFINE_ENTRY_ACCESSORS
+ BailoutId AstId(int i) {
+ return BailoutId(AstIdRaw(i)->value());
+ }
+
+ void SetAstId(int i, BailoutId value) {
+ SetAstIdRaw(i, Smi::FromInt(value.ToInt()));
+ }
+
int DeoptCount() {
return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
}
@@ -4057,8 +4157,15 @@ class DeoptimizationInputData: public FixedArray {
class DeoptimizationOutputData: public FixedArray {
public:
int DeoptPoints() { return length() / 2; }
- Smi* AstId(int index) { return Smi::cast(get(index * 2)); }
- void SetAstId(int index, Smi* id) { set(index * 2, id); }
+
+ BailoutId AstId(int index) {
+ return BailoutId(Smi::cast(get(index * 2))->value());
+ }
+
+ void SetAstId(int index, BailoutId id) {
+ set(index * 2, Smi::FromInt(id.ToInt()));
+ }
+
Smi* PcAndState(int index) { return Smi::cast(get(1 + index * 2)); }
void SetPcAndState(int index, Smi* offset) { set(1 + index * 2, offset); }
@@ -4093,8 +4200,8 @@ class TypeFeedbackCells: public FixedArray {
static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
// Accessors for AST ids associated with cache values.
- inline Smi* AstId(int index);
- inline void SetAstId(int index, Smi* id);
+ inline TypeFeedbackId AstId(int index);
+ inline void SetAstId(int index, TypeFeedbackId id);
// Accessors for global property cells holding the cache values.
inline JSGlobalPropertyCell* Cell(int index);
@@ -4134,30 +4241,49 @@ class Code: public HeapObject {
FLAGS_MAX_VALUE = kMaxInt
};
+#define CODE_KIND_LIST(V) \
+ V(FUNCTION) \
+ V(OPTIMIZED_FUNCTION) \
+ V(STUB) \
+ V(BUILTIN) \
+ V(LOAD_IC) \
+ V(KEYED_LOAD_IC) \
+ V(CALL_IC) \
+ V(KEYED_CALL_IC) \
+ V(STORE_IC) \
+ V(KEYED_STORE_IC) \
+ V(UNARY_OP_IC) \
+ V(BINARY_OP_IC) \
+ V(COMPARE_IC) \
+ V(TO_BOOLEAN_IC)
+
enum Kind {
- FUNCTION,
- OPTIMIZED_FUNCTION,
- STUB,
- BUILTIN,
- LOAD_IC,
- KEYED_LOAD_IC,
- CALL_IC,
- KEYED_CALL_IC,
- STORE_IC,
- KEYED_STORE_IC,
- UNARY_OP_IC,
- BINARY_OP_IC,
- COMPARE_IC,
- TO_BOOLEAN_IC,
- // No more than 16 kinds. The value currently encoded in four bits in
- // Flags.
+#define DEFINE_CODE_KIND_ENUM(name) name,
+ CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
+#undef DEFINE_CODE_KIND_ENUM
// Pseudo-kinds.
+ LAST_CODE_KIND = TO_BOOLEAN_IC,
REGEXP = BUILTIN,
FIRST_IC_KIND = LOAD_IC,
LAST_IC_KIND = TO_BOOLEAN_IC
};
+ // No more than 16 kinds. The value is currently encoded in four bits in
+ // Flags.
+ STATIC_ASSERT(LAST_CODE_KIND < 16);
+
+ // Types of stubs.
+ enum StubType {
+ NORMAL,
+ FIELD,
+ CONSTANT_FUNCTION,
+ CALLBACKS,
+ INTERCEPTOR,
+ MAP_TRANSITION,
+ NONEXISTENT
+ };
+
enum {
NUMBER_OF_KINDS = LAST_IC_KIND + 1
};
@@ -4170,7 +4296,7 @@ class Code: public HeapObject {
// Printing
static const char* Kind2String(Kind kind);
static const char* ICState2String(InlineCacheState state);
- static const char* PropertyType2String(PropertyType type);
+ static const char* StubType2String(StubType type);
static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
inline void Disassemble(const char* name) {
Disassemble(name, stdout);
@@ -4220,7 +4346,7 @@ class Code: public HeapObject {
inline Kind kind();
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
- inline PropertyType type(); // Only valid for monomorphic IC stubs.
+ inline StubType type(); // Only valid for monomorphic IC stubs.
inline int arguments_count(); // Only valid for call IC stubs.
// Testers for IC stub kinds.
@@ -4324,6 +4450,8 @@ class Code: public HeapObject {
inline bool has_function_cache();
inline void set_has_function_cache(bool flag);
+ bool allowed_in_shared_map_code_cache();
+
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -4361,19 +4489,19 @@ class Code: public HeapObject {
Kind kind,
InlineCacheState ic_state = UNINITIALIZED,
ExtraICState extra_ic_state = kNoExtraICState,
- PropertyType type = NORMAL,
+ StubType type = NORMAL,
int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
- PropertyType type,
+ StubType type,
ExtraICState extra_ic_state = kNoExtraICState,
InlineCacheHolderFlag holder = OWN_MAP,
int argc = -1);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
- static inline PropertyType ExtractTypeFromFlags(Flags flags);
+ static inline StubType ExtractTypeFromFlags(Flags flags);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
@@ -4446,12 +4574,28 @@ class Code: public HeapObject {
}
void CodePrint(FILE* out);
#endif
-#ifdef DEBUG
- void CodeVerify();
-#endif
+ DECLARE_VERIFIER(Code)
+
void ClearInlineCaches();
void ClearTypeFeedbackCells(Heap* heap);
+#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
+ enum Age {
+ kNoAge = 0,
+ CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM)
+ kAfterLastCodeAge,
+ kLastCodeAge = kAfterLastCodeAge - 1,
+ kCodeAgeCount = kAfterLastCodeAge - 1
+ };
+#undef DECLARE_CODE_AGE_ENUM
+
+ // Code aging
+ static void MakeCodeAgeSequenceYoung(byte* sequence);
+ void MakeYoung();
+ void MakeOlder(MarkingParity);
+ static bool IsYoungSequence(byte* sequence);
+ bool IsOld();
+
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -4468,28 +4612,20 @@ class Code: public HeapObject {
static const int kICAgeOffset =
kGCMetadataOffset + kPointerSize;
static const int kFlagsOffset = kICAgeOffset + kIntSize;
- static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
- static const int kKindSpecificFlagsSize = 2 * kIntSize;
+ static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
+ static const int kKindSpecificFlags2Offset =
+ kKindSpecificFlags1Offset + kIntSize;
- static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
- kKindSpecificFlagsSize;
+ static const int kHeaderPaddingStart = kKindSpecificFlags2Offset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
static const int kHeaderSize =
(kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
- // Byte offsets within kKindSpecificFlagsOffset.
- static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset;
- static const int kOptimizableOffset = kKindSpecificFlagsOffset;
- static const int kStackSlotsOffset = kKindSpecificFlagsOffset;
- static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
-
- static const int kUnaryOpTypeOffset = kStubMajorKeyOffset + 1;
- static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
- static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
- static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
- static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1;
+ // Byte offsets within kKindSpecificFlags1Offset.
+ static const int kOptimizableOffset = kKindSpecificFlags1Offset;
+ static const int kCheckTypeOffset = kKindSpecificFlags1Offset;
static const int kFullCodeFlags = kOptimizableOffset + 1;
class FullCodeFlagsHasDeoptimizationSupportField:
@@ -4497,26 +4633,90 @@ class Code: public HeapObject {
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
- static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
-
- static const int kCompareOperationOffset = kCompareStateOffset + 1;
-
static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
- static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
- static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
-
// Flags layout. BitField<type, shift, size>.
class ICStateField: public BitField<InlineCacheState, 0, 3> {};
- class TypeField: public BitField<PropertyType, 3, 4> {};
- class CacheHolderField: public BitField<InlineCacheHolderFlag, 7, 1> {};
- class KindField: public BitField<Kind, 8, 4> {};
- class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
- class IsPregeneratedField: public BitField<bool, 14, 1> {};
+ class TypeField: public BitField<StubType, 3, 3> {};
+ class CacheHolderField: public BitField<InlineCacheHolderFlag, 6, 1> {};
+ class KindField: public BitField<Kind, 7, 4> {};
+ class ExtraICStateField: public BitField<ExtraICState, 11, 2> {};
+ class IsPregeneratedField: public BitField<bool, 13, 1> {};
+
+ // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
+ static const int kStackSlotsFirstBit = 0;
+ static const int kStackSlotsBitCount = 24;
+ static const int kUnaryOpTypeFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kUnaryOpTypeBitCount = 3;
+ static const int kBinaryOpTypeFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kBinaryOpTypeBitCount = 3;
+ static const int kBinaryOpResultTypeFirstBit =
+ kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount;
+ static const int kBinaryOpResultTypeBitCount = 3;
+ static const int kCompareStateFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kCompareStateBitCount = 3;
+ static const int kCompareOperationFirstBit =
+ kCompareStateFirstBit + kCompareStateBitCount;
+ static const int kCompareOperationBitCount = 4;
+ static const int kToBooleanStateFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kToBooleanStateBitCount = 8;
+ static const int kHasFunctionCacheFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kHasFunctionCacheBitCount = 1;
+
+ STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
+ STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
+ STATIC_ASSERT(kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount <= 32);
+ STATIC_ASSERT(kBinaryOpResultTypeFirstBit +
+ kBinaryOpResultTypeBitCount <= 32);
+ STATIC_ASSERT(kCompareStateFirstBit + kCompareStateBitCount <= 32);
+ STATIC_ASSERT(kCompareOperationFirstBit + kCompareOperationBitCount <= 32);
+ STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
+ STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
+
+ class StackSlotsField: public BitField<int,
+ kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
+ class UnaryOpTypeField: public BitField<int,
+ kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
+ class BinaryOpTypeField: public BitField<int,
+ kBinaryOpTypeFirstBit, kBinaryOpTypeBitCount> {}; // NOLINT
+ class BinaryOpResultTypeField: public BitField<int,
+ kBinaryOpResultTypeFirstBit, kBinaryOpResultTypeBitCount> {}; // NOLINT
+ class CompareStateField: public BitField<int,
+ kCompareStateFirstBit, kCompareStateBitCount> {}; // NOLINT
+ class CompareOperationField: public BitField<int,
+ kCompareOperationFirstBit, kCompareOperationBitCount> {}; // NOLINT
+ class ToBooleanStateField: public BitField<int,
+ kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
+ class HasFunctionCacheField: public BitField<bool,
+ kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
+
+ // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
+ static const int kStubMajorKeyFirstBit = 0;
+ static const int kSafepointTableOffsetFirstBit =
+ kStubMajorKeyFirstBit + kStubMajorKeyBits;
+ static const int kSafepointTableOffsetBitCount = 26;
+
+ STATIC_ASSERT(kStubMajorKeyFirstBit + kStubMajorKeyBits <= 32);
+ STATIC_ASSERT(kSafepointTableOffsetFirstBit +
+ kSafepointTableOffsetBitCount <= 32);
+
+ class SafepointTableOffsetField: public BitField<int,
+ kSafepointTableOffsetFirstBit,
+ kSafepointTableOffsetBitCount> {}; // NOLINT
+ class StubMajorKeyField: public BitField<int,
+ kStubMajorKeyFirstBit, kStubMajorKeyBits> {}; // NOLINT
+
+ // KindSpecificFlags2 layout (FUNCTION)
+ class StackCheckTableOffsetField: public BitField<int, 0, 31> {};
// Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 15;
+ static const int kArgumentsCountShift = 14;
static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
// This constant should be encodable in an ARM instruction.
@@ -4524,6 +4724,21 @@ class Code: public HeapObject {
TypeField::kMask | CacheHolderField::kMask;
private:
+ friend class RelocIterator;
+
+ // Code aging
+ byte* FindCodeAgeSequence();
+ static void GetCodeAgeAndParity(Code* code, Age* age,
+ MarkingParity* parity);
+ static void GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity);
+ static Code* GetCodeAgeStub(Age age, MarkingParity parity);
+
+ // Code aging -- platform-specific
+ byte* FindPlatformCodeAgeSequence();
+ static void PatchPlatformCodeAge(byte* sequence, Age age,
+ MarkingParity parity);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
};
@@ -4566,12 +4781,20 @@ class Map: public HeapObject {
inline void set_bit_field2(byte value);
// Bit field 3.
- // TODO(1399): It should be possible to make room for bit_field3 in the map
- // without overloading the instance descriptors field (and storing it in the
- // DescriptorArray when the map has one).
inline int bit_field3();
inline void set_bit_field3(int value);
+ class EnumLengthBits: public BitField<int, 0, 11> {};
+ class NumberOfOwnDescriptorsBits: public BitField<int, 11, 11> {};
+ class IsShared: public BitField<bool, 22, 1> {};
+ class FunctionWithPrototype: public BitField<bool, 23, 1> {};
+ class DictionaryMap: public BitField<bool, 24, 1> {};
+ class OwnsDescriptors: public BitField<bool, 25, 1> {};
+ class IsObserved: public BitField<bool, 26, 1> {};
+ class NamedInterceptorIsFallback: public BitField<bool, 27, 1> {};
+ class HasInstanceCallHandler: public BitField<bool, 28, 1> {};
+ class AttachedToSharedFunctionInfo: public BitField<bool, 29, 1> {};
+
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
// property is set to a value that is not a JSObject, the prototype
@@ -4630,11 +4853,11 @@ class Map: public HeapObject {
// Tells whether the instance has a call-as-function handler.
inline void set_has_instance_call_handler() {
- set_bit_field3(bit_field3() | (1 << kHasInstanceCallHandler));
+ set_bit_field3(HasInstanceCallHandler::update(bit_field3(), true));
}
inline bool has_instance_call_handler() {
- return ((1 << kHasInstanceCallHandler) & bit_field3()) != 0;
+ return HasInstanceCallHandler::decode(bit_field3());
}
inline void set_is_extensible(bool value);
@@ -4654,17 +4877,21 @@ class Map: public HeapObject {
}
// Tells whether the instance has fast elements that are only Smis.
- inline bool has_fast_smi_only_elements() {
- return elements_kind() == FAST_SMI_ONLY_ELEMENTS;
+ inline bool has_fast_smi_elements() {
+ return IsFastSmiElementsKind(elements_kind());
}
// Tells whether the instance has fast elements.
- inline bool has_fast_elements() {
- return elements_kind() == FAST_ELEMENTS;
+ inline bool has_fast_object_elements() {
+ return IsFastObjectElementsKind(elements_kind());
+ }
+
+ inline bool has_fast_smi_or_object_elements() {
+ return IsFastSmiOrObjectElementsKind(elements_kind());
}
inline bool has_fast_double_elements() {
- return elements_kind() == FAST_DOUBLE_ELEMENTS;
+ return IsFastDoubleElementsKind(elements_kind());
}
inline bool has_non_strict_arguments_elements() {
@@ -4672,13 +4899,11 @@ class Map: public HeapObject {
}
inline bool has_external_array_elements() {
- ElementsKind kind(elements_kind());
- return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+ return IsExternalArrayElementsKind(elements_kind());
}
inline bool has_dictionary_elements() {
- return elements_kind() == DICTIONARY_ELEMENTS;
+ return IsDictionaryElementsKind(elements_kind());
}
inline bool has_slow_elements_kind() {
@@ -4689,6 +4914,20 @@ class Map: public HeapObject {
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
+ inline bool HasTransitionArray();
+ inline bool HasElementsTransition();
+ inline Map* elements_transition_map();
+ MUST_USE_RESULT inline MaybeObject* set_elements_transition_map(
+ Map* transitioned_map);
+ inline void SetTransition(int transition_index, Map* target);
+ inline Map* GetTransition(int transition_index);
+ MUST_USE_RESULT inline MaybeObject* AddTransition(String* key,
+ Map* target,
+ SimpleTransitionFlag flag);
+ DECL_ACCESSORS(transitions, TransitionArray)
+ inline void ClearTransitions(Heap* heap,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@@ -4699,9 +4938,15 @@ class Map: public HeapObject {
// behavior. If true, the map should never be modified, instead a clone
// should be created and modified.
inline void set_is_shared(bool value);
-
inline bool is_shared();
+ // Tells whether the map is used for JSObjects in dictionary mode (ie
+ // normalized objects, ie objects for which HasFastProperties returns false).
+ // A map can never be used for both dictionary mode and fast mode JSObjects.
+ // False by default and for HeapObjects that are not JSObjects.
+ inline void set_dictionary_map(bool value);
+ inline bool is_dictionary_map();
+
// Tells whether the instance needs security checks when accessing its
// properties.
inline void set_is_access_check_needed(bool access_check_needed);
@@ -4720,7 +4965,7 @@ class Map: public HeapObject {
// comparisons involving this object
inline void set_use_user_object_comparison(bool value);
inline bool use_user_object_comparison();
-
+
// [prototype]: implicit prototype object.
DECL_ACCESSORS(prototype, Object)
@@ -4729,16 +4974,9 @@ class Map: public HeapObject {
inline JSFunction* unchecked_constructor();
- // Should only be called by the code that initializes map to set initial valid
- // value of the instance descriptor member.
- inline void init_instance_descriptors();
-
// [instance descriptors]: describes the object.
DECL_ACCESSORS(instance_descriptors, DescriptorArray)
-
- // Sets the instance descriptor array for the map to be an empty descriptor
- // array.
- inline void clear_instance_descriptors();
+ inline void InitializeDescriptors(DescriptorArray* descriptors);
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
@@ -4750,6 +4988,7 @@ class Map: public HeapObject {
inline Object* GetBackPointer();
inline void SetBackPointer(Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void init_back_pointer(Object* undefined);
// [prototype transitions]: cache of prototype transitions.
// Prototype transition is a transition that happens
@@ -4759,27 +4998,29 @@ class Map: public HeapObject {
// 1: back pointer that overlaps with prototype transitions field.
// 2 + 2 * i: prototype
// 3 + 2 * i: target map
- DECL_ACCESSORS(prototype_transitions, FixedArray)
+ inline FixedArray* GetPrototypeTransitions();
+ MUST_USE_RESULT inline MaybeObject* SetPrototypeTransitions(
+ FixedArray* prototype_transitions);
+ inline bool HasPrototypeTransitions();
- inline void init_prototype_transitions(Object* undefined);
- inline HeapObject* unchecked_prototype_transitions();
+ inline HeapObject* UncheckedPrototypeTransitions();
+ inline TransitionArray* unchecked_transition_array();
- static const int kProtoTransitionHeaderSize = 2;
+ static const int kProtoTransitionHeaderSize = 1;
static const int kProtoTransitionNumberOfEntriesOffset = 0;
- static const int kProtoTransitionBackPointerOffset = 1;
static const int kProtoTransitionElementsPerEntry = 2;
static const int kProtoTransitionPrototypeOffset = 0;
static const int kProtoTransitionMapOffset = 1;
inline int NumberOfProtoTransitions() {
- FixedArray* cache = prototype_transitions();
+ FixedArray* cache = GetPrototypeTransitions();
if (cache->length() == 0) return 0;
return
Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
}
inline void SetNumberOfProtoTransitions(int value) {
- FixedArray* cache = prototype_transitions();
+ FixedArray* cache = GetPrototypeTransitions();
ASSERT(cache->length() != 0);
cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
Smi::FromInt(value));
@@ -4788,18 +5029,86 @@ class Map: public HeapObject {
// Lookup in the map's instance descriptors and fill out the result
// with the given holder if the name is found. The holder may be
// NULL when this function is used from the compiler.
- void LookupInDescriptors(JSObject* holder,
- String* name,
- LookupResult* result);
+ inline void LookupDescriptor(JSObject* holder,
+ String* name,
+ LookupResult* result);
+
+ inline void LookupTransition(JSObject* holder,
+ String* name,
+ LookupResult* result);
+
+ // The size of transition arrays are limited so they do not end up in large
+ // object space. Otherwise ClearNonLiveTransitions would leak memory while
+ // applying in-place right trimming.
+ inline bool CanHaveMoreTransitions();
+
+ int LastAdded() {
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ ASSERT(number_of_own_descriptors > 0);
+ return number_of_own_descriptors - 1;
+ }
+
+ int NumberOfOwnDescriptors() {
+ return NumberOfOwnDescriptorsBits::decode(bit_field3());
+ }
+
+ void SetNumberOfOwnDescriptors(int number) {
+ ASSERT(number <= instance_descriptors()->number_of_descriptors());
+ set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
+ }
+
+ inline JSGlobalPropertyCell* RetrieveDescriptorsPointer();
+
+ int EnumLength() {
+ return EnumLengthBits::decode(bit_field3());
+ }
+
+ void SetEnumLength(int length) {
+ if (length != kInvalidEnumCache) {
+ ASSERT(length >= 0);
+ ASSERT(length == 0 || instance_descriptors()->HasEnumCache());
+ ASSERT(length <= NumberOfOwnDescriptors());
+ }
+ set_bit_field3(EnumLengthBits::update(bit_field3(), length));
+ }
+
+ inline bool owns_descriptors();
+ inline void set_owns_descriptors(bool is_shared);
+ inline bool is_observed();
+ inline void set_is_observed(bool is_observed);
+
+ MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
+ MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
+ MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
+ DescriptorArray* descriptors,
+ String* name,
+ TransitionFlag flag,
+ int descriptor_index);
+ MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
+ Descriptor* descriptor);
+ MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
+ TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* CopyInsertDescriptor(Descriptor* descriptor,
+ TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* CopyReplaceDescriptor(
+ DescriptorArray* descriptors,
+ Descriptor* descriptor,
+ int index,
+ TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
+ TransitionFlag flag);
MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
+ inline void AppendDescriptor(Descriptor* desc,
+ const DescriptorArray::WhitenessWitness&);
+
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
- MUST_USE_RESULT MaybeObject* CopyDropTransitions();
+ MUST_USE_RESULT MaybeObject* Copy();
// Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name);
@@ -4809,7 +5118,8 @@ class Map: public HeapObject {
// Returns the number of properties described in instance_descriptors
// filtering out properties with the specified attributes.
- int NumberOfDescribedProperties(PropertyAttributes filter = NONE);
+ int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
+ PropertyAttributes filter = NONE);
// Casting.
static inline Map* cast(Object* obj);
@@ -4828,6 +5138,13 @@ class Map: public HeapObject {
Handle<Code> code);
MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
+ // Extend the descriptor array of the map with the list of descriptors.
+ // In case of duplicates, the latest descriptor is used.
+ static void AppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors);
+
+ static void EnsureDescriptorSlack(Handle<Map> map, int slack);
+
// Returns the found code or undefined if absent.
Object* FindInCodeCache(String* name, Code::Flags flags);
@@ -4852,23 +5169,11 @@ class Map: public HeapObject {
// The "shared" flags of both this map and |other| are ignored.
bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
- // Returns the contents of this map's descriptor array for the given string.
- // May return NULL. |safe_to_add_transition| is set to false and NULL
- // is returned if adding transitions is not allowed.
- Object* GetDescriptorContents(String* sentinel_name,
- bool* safe_to_add_transitions);
-
// Returns the map that this map transitions to if its elements_kind
// is changed to |elements_kind|, or NULL if no such map is cached yet.
// |safe_to_add_transitions| is set to false if adding transitions is not
// allowed.
- Map* LookupElementsTransitionMap(ElementsKind elements_kind,
- bool* safe_to_add_transition);
-
- // Adds an entry to this map's descriptor array for a transition to
- // |transitioned_map| when its elements_kind is changed to |elements_kind|.
- MUST_USE_RESULT MaybeObject* AddElementsTransition(
- ElementsKind elements_kind, Map* transitioned_map);
+ Map* LookupElementsTransitionMap(ElementsKind elements_kind);
// Returns the transitioned map for this map with the most generic
// elements_kind that's found in |candidates|, or null handle if no match is
@@ -4876,14 +5181,14 @@ class Map: public HeapObject {
Handle<Map> FindTransitionedMap(MapHandleList* candidates);
Map* FindTransitionedMap(MapList* candidates);
- // Zaps the contents of backing data structures in debug mode. Note that the
+ // Zaps the contents of backing data structures. Note that the
// heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects
// holding weak references when incremental marking is used, because it also
// iterates over objects that are otherwise unreachable.
-#ifdef DEBUG
- void ZapInstanceDescriptors();
+ // In general we only want to call these functions in release mode when
+ // heap verification is turned on.
void ZapPrototypeTransitions();
-#endif
+ void ZapTransitions();
// Dispatched behavior.
#ifdef OBJECT_PRINT
@@ -4892,8 +5197,9 @@ class Map: public HeapObject {
}
void MapPrint(FILE* out);
#endif
-#ifdef DEBUG
- void MapVerify();
+ DECLARE_VERIFIER(Map)
+
+#ifdef VERIFY_HEAP
void SharedMapVerify();
#endif
@@ -4904,44 +5210,47 @@ class Map: public HeapObject {
void TraverseTransitionTree(TraverseCallback callback, void* data);
+ // When you set the prototype of an object using the __proto__ accessor you
+ // need a new map for the object (the prototype is stored in the map). In
+ // order not to multiply maps unnecessarily we store these as transitions in
+ // the original map. That way we can transition to the same map if the same
+ // prototype is set, rather than creating a new map every time. The
+ // transitions are in the form of a map where the keys are prototype objects
+ // and the values are the maps the are transitioned to.
static const int kMaxCachedPrototypeTransitions = 256;
- Object* GetPrototypeTransition(Object* prototype);
+ Map* GetPrototypeTransition(Object* prototype);
MUST_USE_RESULT MaybeObject* PutPrototypeTransition(Object* prototype,
Map* map);
static const int kMaxPreAllocatedPropertyFields = 255;
+ // Constant for denoting that the enum cache is not yet initialized.
+ static const int kInvalidEnumCache = EnumLengthBits::kMax;
+
// Layout description.
static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
- // Storage for instance descriptors is overloaded to also contain additional
- // map flags when unused (bit_field3). When the map has instance descriptors,
- // the flags are transferred to the instance descriptor array and accessed
- // through an extra indirection.
- // TODO(1399): It should be possible to make room for bit_field3 in the map
- // without overloading the instance descriptors field, but the map is
- // currently perfectly aligned to 32 bytes and extending it at all would
- // double its size. After the increment GC work lands, this size restriction
- // could be loosened and bit_field3 moved directly back in the map.
- static const int kInstanceDescriptorsOrBitField3Offset =
+ // Storage for the transition array is overloaded to directly contain a back
+ // pointer if unused. When the map has transitions, the back pointer is
+ // transferred to the transition array and accessed through an extra
+ // indirection.
+ static const int kTransitionsOrBackPointerOffset =
kConstructorOffset + kPointerSize;
+ static const int kDescriptorsOffset =
+ kTransitionsOrBackPointerOffset + kPointerSize;
static const int kCodeCacheOffset =
- kInstanceDescriptorsOrBitField3Offset + kPointerSize;
- static const int kPrototypeTransitionsOrBackPointerOffset =
- kCodeCacheOffset + kPointerSize;
- static const int kPadStart =
- kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
- static const int kSize = MAP_POINTER_ALIGN(kPadStart);
+ kDescriptorsOffset + kPointerSize;
+ static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
+ static const int kSize = kBitField3Offset + kPointerSize;
// Layout of pointer fields. Heap iteration code relies on them
// being continuously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
- static const int kPointerFieldsEndOffset =
- kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
+ static const int kPointerFieldsEndOffset = kBitField3Offset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -4974,40 +5283,33 @@ class Map: public HeapObject {
// Bit positions for bit field 2
static const int kIsExtensible = 0;
- static const int kFunctionWithPrototype = 1;
- static const int kStringWrapperSafeForDefaultValueOf = 2;
- static const int kUseUserObjectComparison = 3;
+ static const int kStringWrapperSafeForDefaultValueOf = 1;
+ static const int kUseUserObjectComparison = 2;
// No bits can be used after kElementsKindFirstBit, they are all reserved for
// storing ElementKind.
- static const int kElementsKindShift = 4;
- static const int kElementsKindBitCount = 4;
+ static const int kElementsKindShift = 3;
+ static const int kElementsKindBitCount = 5;
// Derived values from bit field 2
static const int kElementsKindMask = (-1 << kElementsKindShift) &
((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
(FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
- static const int8_t kMaximumBitField2FastSmiOnlyElementValue =
- static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) <<
+ static const int8_t kMaximumBitField2FastSmiElementValue =
+ static_cast<int8_t>((FAST_SMI_ELEMENTS + 1) <<
+ Map::kElementsKindShift) - 1;
+ static const int8_t kMaximumBitField2FastHoleyElementValue =
+ static_cast<int8_t>((FAST_HOLEY_ELEMENTS + 1) <<
+ Map::kElementsKindShift) - 1;
+ static const int8_t kMaximumBitField2FastHoleySmiElementValue =
+ static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1) <<
Map::kElementsKindShift) - 1;
-
- // Bit positions for bit field 3
- static const int kIsShared = 0;
- static const int kNamedInterceptorIsFallback = 1;
- static const int kHasInstanceCallHandler = 2;
- static const int kAttachedToSharedFunctionInfo = 3;
-
- // Layout of the default cache. It holds alternating name and code objects.
- static const int kCodeCacheEntrySize = 2;
- static const int kCodeCacheEntryNameOffset = 0;
- static const int kCodeCacheEntryCodeOffset = 1;
typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
kPointerFieldsEndOffset,
kSize> BodyDescriptor;
private:
- String* elements_transition_sentinel_name();
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
};
@@ -5101,9 +5403,7 @@ class Script: public Struct {
}
void ScriptPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ScriptVerify();
-#endif
+ DECLARE_VERIFIER(Script)
static const int kSourceOffset = HeapObject::kHeaderSize;
static const int kNameOffset = kSourceOffset + kPointerSize;
@@ -5188,6 +5488,29 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+ // [optimized_code_map]: Map from native context to optimized code
+ // and a shared literals array or Smi 0 if none.
+ DECL_ACCESSORS(optimized_code_map, Object)
+
+ // Returns index i of the entry with the specified context. At position
+ // i - 1 is the context, position i the code, and i + 1 the literals array.
+ // Returns -1 when no matching entry is found.
+ int SearchOptimizedCodeMap(Context* native_context);
+
+ // Installs optimized code from the code map on the given closure. The
+ // index has to be consistent with a search result as defined above.
+ void InstallFromOptimizedCodeMap(JSFunction* function, int index);
+
+ // Clear optimized code map.
+ void ClearOptimizedCodeMap();
+
+ // Add a new entry to the optimized code map.
+ static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ Handle<Code> code,
+ Handle<FixedArray> literals);
+ static const int kEntryLength = 3;
+
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
@@ -5295,6 +5618,10 @@ class SharedFunctionInfo: public HeapObject {
// IsInobjectSlackTrackingInProgress is false after this call.
void CompleteInobjectSlackTracking();
+ // Invoked before pointers in SharedFunctionInfo are being marked.
+ // Also clears the optimized code map.
+ inline void BeforeVisitingPointers();
+
// Clears the initial_map before the GC marking phase to ensure the reference
// is weak. IsInobjectSlackTrackingInProgress is false after this call.
void DetachInitialMap();
@@ -5380,8 +5707,8 @@ class SharedFunctionInfo: public HeapObject {
// A counter used to determine when to stress the deoptimizer with a
// deopt.
- inline int deopt_counter();
- inline void set_deopt_counter(int counter);
+ inline int stress_deopt_counter();
+ inline void set_stress_deopt_counter(int counter);
inline int profiler_ticks();
@@ -5407,6 +5734,12 @@ class SharedFunctionInfo: public HeapObject {
// when doing GC if we expect that the function will no longer be used.
DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
+ // Indicates if this function can be lazy compiled without a context.
+ // This is used to determine if we can force compilation without reaching
+ // the function through program execution but through other means (e.g. heap
+ // iteration by the debugger).
+ DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation_without_context)
+
// Indicates how many full GCs this function has survived with assigned
// code object. Used to determine when it is relatively safe to flush
// this code object and replace it with lazy compilation stub.
@@ -5474,6 +5807,9 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that the function cannot be inlined.
DECL_BOOLEAN_ACCESSORS(dont_inline)
+ // Indicates that code for this function cannot be cached.
+ DECL_BOOLEAN_ACCESSORS(dont_cache)
+
// Indicates whether or not the code in the shared function support
// deoptimization.
inline bool has_deoptimization_support();
@@ -5483,12 +5819,12 @@ class SharedFunctionInfo: public HeapObject {
// Disable (further) attempted optimization of all functions sharing this
// shared function info.
- void DisableOptimization();
+ void DisableOptimization(const char* reason);
// Lookup the bailout ID and ASSERT that it exists in the non-optimized
// code, returns whether it asserted (i.e., always true if assertions are
// disabled).
- bool VerifyBailoutId(int id);
+ bool VerifyBailoutId(BailoutId id);
// Check whether a inlined constructor can be generated with the given
// prototype.
@@ -5512,9 +5848,26 @@ class SharedFunctionInfo: public HeapObject {
bool HasSourceCode();
Handle<Object> GetSourceCode();
+ // Number of times the function was optimized.
inline int opt_count();
inline void set_opt_count(int opt_count);
+ // Number of times the function was deoptimized.
+ inline void set_deopt_count(int value);
+ inline int deopt_count();
+ inline void increment_deopt_count();
+
+ // Number of time we tried to re-enable optimization after it
+ // was disabled due to high number of deoptimizations.
+ inline void set_opt_reenable_tries(int value);
+ inline int opt_reenable_tries();
+
+ inline void TryReenableOptimization();
+
+ // Stores deopt_count, opt_reenable_tries and ic_age as bit-fields.
+ inline void set_counters(int value);
+ inline int counters();
+
// Source size of this function.
int SourceSize();
@@ -5533,21 +5886,16 @@ class SharedFunctionInfo: public HeapObject {
}
void SharedFunctionInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void SharedFunctionInfoVerify();
-#endif
+ DECLARE_VERIFIER(SharedFunctionInfo)
void ResetForNewContext(int new_ic_age);
- // Helpers to compile the shared code. Returns true on success, false on
- // failure (e.g., stack overflow during compilation).
- static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
- ClearExceptionFlag flag);
+ // Helper to compile the shared code. Returns true on success, false on
+ // failure (e.g., stack overflow during compilation). This is only used by
+ // the debugger, it is not possible to compile without a context otherwise.
static bool CompileLazy(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag);
- void SharedFunctionInfoIterateBody(ObjectVisitor* v);
-
// Casting.
static inline SharedFunctionInfo* cast(Object* obj);
@@ -5558,7 +5906,8 @@ class SharedFunctionInfo: public HeapObject {
// Pointer fields.
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kCodeOffset = kNameOffset + kPointerSize;
- static const int kScopeInfoOffset = kCodeOffset + kPointerSize;
+ static const int kOptimizedCodeMapOffset = kCodeOffset + kPointerSize;
+ static const int kScopeInfoOffset = kOptimizedCodeMapOffset + kPointerSize;
static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize;
static const int kInstanceClassNameOffset =
kConstructStubOffset + kPointerSize;
@@ -5571,13 +5920,14 @@ class SharedFunctionInfo: public HeapObject {
kInferredNameOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInitialMapOffset + kPointerSize;
- // ic_age is a Smi field. It could be grouped with another Smi field into a
- // PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
- static const int kICAgeOffset = kThisPropertyAssignmentsOffset + kPointerSize;
+ // ast_node_count is a Smi field. It could be grouped with another Smi field
+ // into a PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
+ static const int kAstNodeCountOffset =
+ kThisPropertyAssignmentsOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
- kICAgeOffset + kPointerSize;
+ kAstNodeCountOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
@@ -5595,12 +5945,11 @@ class SharedFunctionInfo: public HeapObject {
kCompilerHintsOffset + kPointerSize;
static const int kOptCountOffset =
kThisPropertyAssignmentsCountOffset + kPointerSize;
- static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
- static const int kDeoptCounterOffset = kAstNodeCountOffset + kPointerSize;
-
+ static const int kCountersOffset = kOptCountOffset + kPointerSize;
+ static const int kStressDeoptCounterOffset = kCountersOffset + kPointerSize;
// Total size.
- static const int kSize = kDeoptCounterOffset + kPointerSize;
+ static const int kSize = kStressDeoptCounterOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow iteration without maps decoding during
@@ -5612,7 +5961,7 @@ class SharedFunctionInfo: public HeapObject {
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
- kICAgeOffset + kPointerSize;
+ kAstNodeCountOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
@@ -5636,11 +5985,11 @@ class SharedFunctionInfo: public HeapObject {
static const int kOptCountOffset =
kThisPropertyAssignmentsCountOffset + kIntSize;
- static const int kAstNodeCountOffset = kOptCountOffset + kIntSize;
- static const int kDeoptCounterOffset = kAstNodeCountOffset + kIntSize;
+ static const int kCountersOffset = kOptCountOffset + kIntSize;
+ static const int kStressDeoptCounterOffset = kCountersOffset + kIntSize;
// Total size.
- static const int kSize = kDeoptCounterOffset + kIntSize;
+ static const int kSize = kStressDeoptCounterOffset + kIntSize;
#endif
@@ -5676,6 +6025,7 @@ class SharedFunctionInfo: public HeapObject {
enum CompilerHints {
kHasOnlySimpleThisPropertyAssignments,
kAllowLazyCompilation,
+ kAllowLazyCompilationWithoutContext,
kLiveObjectsMayExist,
kCodeAgeShift,
kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
@@ -5691,9 +6041,14 @@ class SharedFunctionInfo: public HeapObject {
kIsFunction,
kDontOptimize,
kDontInline,
+ kDontCache,
kCompilerHintsCount // Pseudo entry
};
+ class DeoptCountBits: public BitField<int, 0, 4> {};
+ class OptReenableTriesBits: public BitField<int, 4, 18> {};
+ class ICAgeBits: public BitField<int, 22, 8> {};
+
private:
#if V8_HOST_ARCH_32_BIT
// On 32 bit platforms, compiler hints is a smi.
@@ -5753,6 +6108,9 @@ class JSModule: public JSObject {
// [context]: the context holding the module's locals, or undefined if none.
DECL_ACCESSORS(context, Object)
+ // [scope_info]: Scope info.
+ DECL_ACCESSORS(scope_info, ScopeInfo)
+
// Casting.
static inline JSModule* cast(Object* obj);
@@ -5763,13 +6121,12 @@ class JSModule: public JSObject {
}
void JSModulePrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSModuleVerify();
-#endif
+ DECLARE_VERIFIER(JSModule)
// Layout description.
static const int kContextOffset = JSObject::kHeaderSize;
- static const int kSize = kContextOffset + kPointerSize;
+ static const int kScopeInfoOffset = kContextOffset + kPointerSize;
+ static const int kSize = kScopeInfoOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSModule);
@@ -5818,18 +6175,26 @@ class JSFunction: public JSObject {
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
void MarkForLazyRecompilation();
+ void MarkForParallelRecompilation();
// Helpers to compile this function. Returns true on success, false on
// failure (e.g., stack overflow during compilation).
+ static bool EnsureCompiled(Handle<JSFunction> function,
+ ClearExceptionFlag flag);
static bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag);
static bool CompileOptimized(Handle<JSFunction> function,
- int osr_ast_id,
+ BailoutId osr_ast_id,
ClearExceptionFlag flag);
// Tells whether or not the function is already marked for lazy
// recompilation.
inline bool IsMarkedForLazyRecompilation();
+ inline bool IsMarkedForParallelRecompilation();
+
+ // Tells whether or not the function is on the parallel
+ // recompilation queue.
+ inline bool IsInRecompileQueue();
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -5859,8 +6224,6 @@ class JSFunction: public JSObject {
// The initial map for an object created by this constructor.
inline Map* initial_map();
inline void set_initial_map(Map* value);
- MUST_USE_RESULT inline MaybeObject* set_initial_map_and_cache_transitions(
- Map* value);
inline bool has_initial_map();
// Get and set the prototype property on a JSFunction. If the
@@ -5876,7 +6239,7 @@ class JSFunction: public JSObject {
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
- Object* RemovePrototype();
+ void RemovePrototype();
inline bool should_have_prototype();
// Accessor for this function's initial map's [[class]]
@@ -5888,7 +6251,7 @@ class JSFunction: public JSObject {
// Instances created afterwards will have a map whose [[class]] is
// set to 'value', but there is no guarantees on instances created
// before.
- Object* SetInstanceClassName(String* name);
+ void SetInstanceClassName(String* name);
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
@@ -5917,15 +6280,13 @@ class JSFunction: public JSObject {
}
void JSFunctionPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSFunctionVerify();
-#endif
+ DECLARE_VERIFIER(JSFunction)
// Returns the number of allocated literals.
inline int NumberOfLiterals();
- // Retrieve the global context from a function's literal array.
- static Context* GlobalContextFromLiterals(FixedArray* literals);
+ // Retrieve the native context from a function's literal array.
+ static Context* NativeContextFromLiterals(FixedArray* literals);
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
@@ -5942,7 +6303,7 @@ class JSFunction: public JSObject {
// Layout of the literals array.
static const int kLiteralsPrefixSize = 1;
- static const int kLiteralGlobalContextIndex = 0;
+ static const int kLiteralNativeContextIndex = 0;
// Layout of the bound-function binding array.
static const int kBoundFunctionIndex = 0;
@@ -5964,9 +6325,9 @@ class JSFunction: public JSObject {
class JSGlobalProxy : public JSObject {
public:
- // [context]: the owner global context of this global proxy object.
+ // [native_context]: the owner native context of this global proxy object.
// It is null value if this object is not used by any context.
- DECL_ACCESSORS(context, Object)
+ DECL_ACCESSORS(native_context, Object)
// Casting.
static inline JSGlobalProxy* cast(Object* obj);
@@ -5978,13 +6339,11 @@ class JSGlobalProxy : public JSObject {
}
void JSGlobalProxyPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSGlobalProxyVerify();
-#endif
+ DECLARE_VERIFIER(JSGlobalProxy)
// Layout description.
- static const int kContextOffset = JSObject::kHeaderSize;
- static const int kSize = kContextOffset + kPointerSize;
+ static const int kNativeContextOffset = JSObject::kHeaderSize;
+ static const int kSize = kNativeContextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
@@ -6001,7 +6360,10 @@ class GlobalObject: public JSObject {
// [builtins]: the object holding the runtime routines written in JS.
DECL_ACCESSORS(builtins, JSBuiltinsObject)
- // [global context]: the global context corresponding to this global object.
+ // [native context]: the natives corresponding to this global object.
+ DECL_ACCESSORS(native_context, Context)
+
+ // [global context]: the most recent (i.e. innermost) global context.
DECL_ACCESSORS(global_context, Context)
// [global receiver]: the global receiver object of the context
@@ -6032,7 +6394,8 @@ class GlobalObject: public JSObject {
// Layout description.
static const int kBuiltinsOffset = JSObject::kHeaderSize;
- static const int kGlobalContextOffset = kBuiltinsOffset + kPointerSize;
+ static const int kNativeContextOffset = kBuiltinsOffset + kPointerSize;
+ static const int kGlobalContextOffset = kNativeContextOffset + kPointerSize;
static const int kGlobalReceiverOffset = kGlobalContextOffset + kPointerSize;
static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
@@ -6054,9 +6417,7 @@ class JSGlobalObject: public GlobalObject {
}
void JSGlobalObjectPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSGlobalObjectVerify();
-#endif
+ DECLARE_VERIFIER(JSGlobalObject)
// Layout description.
static const int kSize = GlobalObject::kHeaderSize;
@@ -6088,9 +6449,7 @@ class JSBuiltinsObject: public GlobalObject {
}
void JSBuiltinsObjectPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSBuiltinsObjectVerify();
-#endif
+ DECLARE_VERIFIER(JSBuiltinsObject)
// Layout description. The size of the builtins object includes
// room for two pointers per runtime routine written in javascript
@@ -6131,9 +6490,7 @@ class JSValue: public JSObject {
}
void JSValuePrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSValueVerify();
-#endif
+ DECLARE_VERIFIER(JSValue)
// Layout description.
static const int kValueOffset = JSObject::kHeaderSize;
@@ -6187,9 +6544,8 @@ class JSDate: public JSObject {
}
void JSDatePrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSDateVerify();
-#endif
+ DECLARE_VERIFIER(JSDate)
+
// The order is important. It must be kept in sync with date macros
// in macros.py.
enum FieldIndex {
@@ -6285,9 +6641,7 @@ class JSMessageObject: public JSObject {
}
void JSMessageObjectPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSMessageObjectVerify();
-#endif
+ DECLARE_VERIFIER(JSMessageObject)
// Layout description.
static const int kTypeOffset = JSObject::kHeaderSize;
@@ -6376,9 +6730,7 @@ class JSRegExp: public JSObject {
static inline JSRegExp* cast(Object* obj);
// Dispatched behavior.
-#ifdef DEBUG
- void JSRegExpVerify();
-#endif
+ DECLARE_VERIFIER(JSRegExp)
static const int kDataOffset = JSObject::kHeaderSize;
static const int kSize = kDataOffset + kPointerSize;
@@ -6478,13 +6830,15 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape,
HashTableKey*> {
public:
// Find cached value for a string key, otherwise return null.
- Object* Lookup(String* src);
+ Object* Lookup(String* src, Context* context);
Object* LookupEval(String* src,
Context* context,
LanguageMode language_mode,
int scope_position);
Object* LookupRegExp(String* source, JSRegExp::Flags flags);
- MUST_USE_RESULT MaybeObject* Put(String* src, Object* value);
+ MUST_USE_RESULT MaybeObject* Put(String* src,
+ Context* context,
+ Object* value);
MUST_USE_RESULT MaybeObject* PutEval(String* src,
Context* context,
SharedFunctionInfo* value,
@@ -6532,9 +6886,7 @@ class CodeCache: public Struct {
}
void CodeCachePrint(FILE* out);
#endif
-#ifdef DEBUG
- void CodeCacheVerify();
-#endif
+ DECLARE_VERIFIER(CodeCache)
static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
static const int kNormalTypeCacheOffset =
@@ -6623,9 +6975,7 @@ class PolymorphicCodeCache: public Struct {
}
void PolymorphicCodeCachePrint(FILE* out);
#endif
-#ifdef DEBUG
- void PolymorphicCodeCacheVerify();
-#endif
+ DECLARE_VERIFIER(PolymorphicCodeCache)
static const int kCacheOffset = HeapObject::kHeaderSize;
static const int kSize = kCacheOffset + kPointerSize;
@@ -6658,7 +7008,15 @@ class TypeFeedbackInfo: public Struct {
inline void set_ic_total_count(int count);
inline int ic_with_type_info_count();
- inline void set_ic_with_type_info_count(int count);
+ inline void change_ic_with_type_info_count(int count);
+
+ inline void initialize_storage();
+
+ inline void change_own_type_change_checksum();
+ inline int own_type_change_checksum();
+
+ inline void set_inlined_type_change_checksum(int checksum);
+ inline bool matches_inlined_type_change_checksum(int checksum);
DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
@@ -6670,18 +7028,27 @@ class TypeFeedbackInfo: public Struct {
}
void TypeFeedbackInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void TypeFeedbackInfoVerify();
-#endif
+ DECLARE_VERIFIER(TypeFeedbackInfo)
- static const int kIcTotalCountOffset = HeapObject::kHeaderSize;
- static const int kIcWithTypeinfoCountOffset =
- kIcTotalCountOffset + kPointerSize;
- static const int kTypeFeedbackCellsOffset =
- kIcWithTypeinfoCountOffset + kPointerSize;
+ static const int kStorage1Offset = HeapObject::kHeaderSize;
+ static const int kStorage2Offset = kStorage1Offset + kPointerSize;
+ static const int kTypeFeedbackCellsOffset = kStorage2Offset + kPointerSize;
static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
private:
+ static const int kTypeChangeChecksumBits = 7;
+
+ class ICTotalCountField: public BitField<int, 0,
+ kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
+ class OwnTypeChangeChecksum: public BitField<int,
+ kSmiValueSize - kTypeChangeChecksumBits,
+ kTypeChangeChecksumBits> {}; // NOLINT
+ class ICsWithTypeInfoCountField: public BitField<int, 0,
+ kSmiValueSize - kTypeChangeChecksumBits> {}; // NOLINT
+ class InlinedTypeChangeChecksum: public BitField<int,
+ kSmiValueSize - kTypeChangeChecksumBits,
+ kTypeChangeChecksumBits> {}; // NOLINT
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo);
};
@@ -6707,9 +7074,7 @@ class AliasedArgumentsEntry: public Struct {
}
void AliasedArgumentsEntryPrint(FILE* out);
#endif
-#ifdef DEBUG
- void AliasedArgumentsEntryVerify();
-#endif
+ DECLARE_VERIFIER(AliasedArgumentsEntry)
static const int kAliasedContextSlot = HeapObject::kHeaderSize;
static const int kSize = kAliasedContextSlot + kPointerSize;
@@ -6752,10 +7117,6 @@ class StringHasher {
// index.
bool is_array_index() { return is_array_index_; }
- bool is_valid() { return is_valid_; }
-
- void invalidate() { is_valid_ = false; }
-
// Calculated hash value for a string consisting of 1 to
// String::kMaxArrayIndexSize digits with no leading zeros (except "0").
// value is represented decimal value.
@@ -6774,13 +7135,33 @@ class StringHasher {
inline uint32_t GetHash();
+ // Reusable parts of the hashing algorithm.
+ INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint32_t c));
+ INLINE(static uint32_t GetHashCore(uint32_t running_hash));
+
int length_;
uint32_t raw_running_hash_;
uint32_t array_index_;
bool is_array_index_;
bool is_first_char_;
- bool is_valid_;
friend class TwoCharHashTableKey;
+
+ template <bool seq_ascii> friend class JsonParser;
+};
+
+
+class IncrementalAsciiStringHasher {
+ public:
+ explicit inline IncrementalAsciiStringHasher(uint32_t seed, char first_char);
+ inline void AddCharacter(uc32 c);
+ inline uint32_t GetHash();
+
+ private:
+ int length_;
+ uint32_t raw_running_hash_;
+ uint32_t array_index_;
+ bool is_array_index_;
+ char first_char_;
};
@@ -6974,9 +7355,6 @@ class String: public HeapObject {
bool IsAsciiEqualTo(Vector<const char> str);
bool IsTwoByteEqualTo(Vector<const uc16> str);
- bool SlowEqualsExternal(uc16 *string, int length);
- bool SlowEqualsExternal(char *string, int length);
-
// Return a UTF8 representation of the string. The string is null
// terminated but may optionally contain nulls. Length is returned
// in length_output if length_output is not a null pointer The string
@@ -7043,9 +7421,8 @@ class String: public HeapObject {
char* ToAsciiArray();
#endif
-#ifdef DEBUG
- void StringVerify();
-#endif
+ DECLARE_VERIFIER(String)
+
inline bool IsFlat();
// Layout description.
@@ -7114,7 +7491,7 @@ class String: public HeapObject {
kIsNotArrayIndexMask | kHashNotComputedMask;
// Value of hash field containing computed hash equal to zero.
- static const int kZeroHash = kIsNotArrayIndexMask;
+ static const int kEmptyStringHash = kIsNotArrayIndexMask;
// Maximal string length.
static const int kMaxLength = (1 << (32 - 2)) - 1;
@@ -7149,32 +7526,47 @@ class String: public HeapObject {
int from,
int to);
- static inline bool IsAscii(const char* chars, int length) {
+ // The return value may point to the first aligned word containing the
+ // first non-ascii character, rather than directly to the non-ascii character.
+ // If the return value is >= the passed length, the entire string was ASCII.
+ static inline int NonAsciiStart(const char* chars, int length) {
+ const char* start = chars;
const char* limit = chars + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
ASSERT(kMaxAsciiCharCode == 0x7F);
const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80;
- while (chars <= limit - sizeof(uintptr_t)) {
+ while (chars + sizeof(uintptr_t) <= limit) {
if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) {
- return false;
+ return static_cast<int>(chars - start);
}
chars += sizeof(uintptr_t);
}
#endif
while (chars < limit) {
- if (static_cast<uint8_t>(*chars) > kMaxAsciiCharCodeU) return false;
+ if (static_cast<uint8_t>(*chars) > kMaxAsciiCharCodeU) {
+ return static_cast<int>(chars - start);
+ }
++chars;
}
- return true;
+ return static_cast<int>(chars - start);
}
- static inline bool IsAscii(const uc16* chars, int length) {
+ static inline bool IsAscii(const char* chars, int length) {
+ return NonAsciiStart(chars, length) >= length;
+ }
+
+ static inline int NonAsciiStart(const uc16* chars, int length) {
const uc16* limit = chars + length;
+ const uc16* start = chars;
while (chars < limit) {
- if (*chars > kMaxAsciiCharCodeU) return false;
+ if (*chars > kMaxAsciiCharCodeU) return static_cast<int>(chars - start);
++chars;
}
- return true;
+ return static_cast<int>(chars - start);
+ }
+
+ static inline bool IsAscii(const uc16* chars, int length) {
+ return NonAsciiStart(chars, length) >= length;
}
protected:
@@ -7287,6 +7679,8 @@ class SeqAsciiString: public SeqString {
unsigned* offset,
unsigned chars);
+ DECLARE_VERIFIER(SeqAsciiString)
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
};
@@ -7390,9 +7784,7 @@ class ConsString: public String {
typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
BodyDescriptor;
-#ifdef DEBUG
- void ConsStringVerify();
-#endif
+ DECLARE_VERIFIER(ConsString)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
@@ -7414,7 +7806,8 @@ class ConsString: public String {
class SlicedString: public String {
public:
inline String* parent();
- inline void set_parent(String* parent);
+ inline void set_parent(String* parent,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline int offset();
inline void set_offset(int offset);
@@ -7443,9 +7836,7 @@ class SlicedString: public String {
kOffsetOffset + kPointerSize, kSize>
BodyDescriptor;
-#ifdef DEBUG
- void SlicedStringVerify();
-#endif
+ DECLARE_VERIFIER(SlicedString)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
@@ -7672,9 +8063,7 @@ class Oddball: public HeapObject {
static inline Oddball* cast(Object* obj);
// Dispatched behavior.
-#ifdef DEBUG
- void OddballVerify();
-#endif
+ DECLARE_VERIFIER(Oddball)
// Initialize the fields.
MUST_USE_RESULT MaybeObject* Initialize(const char* to_string,
@@ -7717,9 +8106,16 @@ class JSGlobalPropertyCell: public HeapObject {
// Casting.
static inline JSGlobalPropertyCell* cast(Object* obj);
-#ifdef DEBUG
- void JSGlobalPropertyCellVerify();
-#endif
+ static inline JSGlobalPropertyCell* FromValueAddress(Address value) {
+ return cast(FromAddress(value - kValueOffset));
+ }
+
+ inline Address ValueAddress() {
+ return address() + kValueOffset;
+ }
+
+ DECLARE_VERIFIER(JSGlobalPropertyCell)
+
#ifdef OBJECT_PRINT
inline void JSGlobalPropertyCellPrint() {
JSGlobalPropertyCellPrint(stdout);
@@ -7763,23 +8159,28 @@ class JSProxy: public JSReceiver {
uint32_t index);
MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
+ JSReceiver* receiver,
String* name,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetElementWithHandler(
+ JSReceiver* receiver,
uint32_t index,
Object* value,
StrictModeFlag strict_mode);
- // If the handler defines an accessor property, invoke its setter
- // (or throw if only a getter exists) and set *found to true. Otherwise false.
- MUST_USE_RESULT MaybeObject* SetPropertyWithHandlerIfDefiningSetter(
+ // If the handler defines an accessor property with a setter, invoke it.
+ // If it defines an accessor property without a setter, or a data property
+ // that is read-only, throw. In all these cases set '*done' to true,
+ // otherwise set it to false.
+ MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypesWithHandler(
+ JSReceiver* receiver,
String* name,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- bool* found);
+ bool* done);
MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
String* name,
@@ -7817,9 +8218,7 @@ class JSProxy: public JSReceiver {
}
void JSProxyPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSProxyVerify();
-#endif
+ DECLARE_VERIFIER(JSProxy)
// Layout description. We add padding so that a proxy has the same
// size as a virgin JSObject. This is essential for becoming a JSObject
@@ -7860,9 +8259,7 @@ class JSFunctionProxy: public JSProxy {
}
void JSFunctionProxyPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSFunctionProxyVerify();
-#endif
+ DECLARE_VERIFIER(JSFunctionProxy)
// Layout description.
static const int kCallTrapOffset = JSProxy::kPaddingOffset;
@@ -7897,9 +8294,7 @@ class JSSet: public JSObject {
}
void JSSetPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSSetVerify();
-#endif
+ DECLARE_VERIFIER(JSSet)
static const int kTableOffset = JSObject::kHeaderSize;
static const int kSize = kTableOffset + kPointerSize;
@@ -7924,9 +8319,7 @@ class JSMap: public JSObject {
}
void JSMapPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSMapVerify();
-#endif
+ DECLARE_VERIFIER(JSMap)
static const int kTableOffset = JSObject::kHeaderSize;
static const int kSize = kTableOffset + kPointerSize;
@@ -7954,9 +8347,7 @@ class JSWeakMap: public JSObject {
}
void JSWeakMapPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSWeakMapVerify();
-#endif
+ DECLARE_VERIFIER(JSWeakMap)
static const int kTableOffset = JSObject::kHeaderSize;
static const int kNextOffset = kTableOffset + kPointerSize;
@@ -7991,9 +8382,7 @@ class Foreign: public HeapObject {
}
void ForeignPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ForeignVerify();
-#endif
+ DECLARE_VERIFIER(Foreign)
// Layout description.
@@ -8050,9 +8439,7 @@ class JSArray: public JSObject {
}
void JSArrayPrint(FILE* out);
#endif
-#ifdef DEBUG
- void JSArrayVerify();
-#endif
+ DECLARE_VERIFIER(JSArray)
// Number of element slots to pre-allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
@@ -8106,6 +8493,7 @@ class AccessorInfo: public Struct {
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(name, Object)
DECL_ACCESSORS(flag, Smi)
+ DECL_ACCESSORS(expected_receiver_type, Object)
inline bool all_can_read();
inline void set_all_can_read(bool value);
@@ -8119,6 +8507,9 @@ class AccessorInfo: public Struct {
inline PropertyAttributes property_attributes();
inline void set_property_attributes(PropertyAttributes attributes);
+ // Checks whether the given receiver is compatible with this accessor.
+ inline bool IsCompatibleReceiver(Object* receiver);
+
static inline AccessorInfo* cast(Object* obj);
#ifdef OBJECT_PRINT
@@ -8127,16 +8518,15 @@ class AccessorInfo: public Struct {
}
void AccessorInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void AccessorInfoVerify();
-#endif
+ DECLARE_VERIFIER(AccessorInfo)
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
static const int kDataOffset = kSetterOffset + kPointerSize;
static const int kNameOffset = kDataOffset + kPointerSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
+ static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
+ static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
private:
// Bit positions in flag.
@@ -8162,7 +8552,19 @@ class AccessorPair: public Struct {
static inline AccessorPair* cast(Object* obj);
- MUST_USE_RESULT MaybeObject* CopyWithoutTransitions();
+ MUST_USE_RESULT MaybeObject* Copy();
+
+ Object* get(AccessorComponent component) {
+ return component == ACCESSOR_GETTER ? getter() : setter();
+ }
+
+ void set(AccessorComponent component, Object* value) {
+ if (component == ACCESSOR_GETTER) {
+ set_getter(value);
+ } else {
+ set_setter(value);
+ }
+ }
// Note: Returns undefined instead in case of a hole.
Object* GetComponent(AccessorComponent component);
@@ -8180,9 +8582,7 @@ class AccessorPair: public Struct {
#ifdef OBJECT_PRINT
void AccessorPairPrint(FILE* out = stdout);
#endif
-#ifdef DEBUG
- void AccessorPairVerify();
-#endif
+ DECLARE_VERIFIER(AccessorPair)
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
@@ -8216,9 +8616,7 @@ class AccessCheckInfo: public Struct {
}
void AccessCheckInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void AccessCheckInfoVerify();
-#endif
+ DECLARE_VERIFIER(AccessCheckInfo)
static const int kNamedCallbackOffset = HeapObject::kHeaderSize;
static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
@@ -8248,9 +8646,7 @@ class InterceptorInfo: public Struct {
}
void InterceptorInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void InterceptorInfoVerify();
-#endif
+ DECLARE_VERIFIER(InterceptorInfo)
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
@@ -8279,9 +8675,7 @@ class CallHandlerInfo: public Struct {
}
void CallHandlerInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void CallHandlerInfoVerify();
-#endif
+ DECLARE_VERIFIER(CallHandlerInfo)
static const int kCallbackOffset = HeapObject::kHeaderSize;
static const int kDataOffset = kCallbackOffset + kPointerSize;
@@ -8297,9 +8691,7 @@ class TemplateInfo: public Struct {
DECL_ACCESSORS(tag, Object)
DECL_ACCESSORS(property_list, Object)
-#ifdef DEBUG
- void TemplateInfoVerify();
-#endif
+ DECLARE_VERIFIER(TemplateInfo)
static const int kTagOffset = HeapObject::kHeaderSize;
static const int kPropertyListOffset = kTagOffset + kPointerSize;
@@ -8342,9 +8734,7 @@ class FunctionTemplateInfo: public TemplateInfo {
}
void FunctionTemplateInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void FunctionTemplateInfoVerify();
-#endif
+ DECLARE_VERIFIER(FunctionTemplateInfo)
static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
@@ -8393,9 +8783,7 @@ class ObjectTemplateInfo: public TemplateInfo {
}
void ObjectTemplateInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void ObjectTemplateInfoVerify();
-#endif
+ DECLARE_VERIFIER(ObjectTemplateInfo)
static const int kConstructorOffset = TemplateInfo::kHeaderSize;
static const int kInternalFieldCountOffset =
@@ -8419,9 +8807,7 @@ class SignatureInfo: public Struct {
}
void SignatureInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void SignatureInfoVerify();
-#endif
+ DECLARE_VERIFIER(SignatureInfo)
static const int kReceiverOffset = Struct::kHeaderSize;
static const int kArgsOffset = kReceiverOffset + kPointerSize;
@@ -8444,9 +8830,7 @@ class TypeSwitchInfo: public Struct {
}
void TypeSwitchInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void TypeSwitchInfoVerify();
-#endif
+ DECLARE_VERIFIER(TypeSwitchInfo)
static const int kTypesOffset = Struct::kHeaderSize;
static const int kSize = kTypesOffset + kPointerSize;
@@ -8496,9 +8880,7 @@ class DebugInfo: public Struct {
}
void DebugInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void DebugInfoVerify();
-#endif
+ DECLARE_VERIFIER(DebugInfo)
static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
static const int kOriginalCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
@@ -8554,9 +8936,7 @@ class BreakPointInfo: public Struct {
}
void BreakPointInfoPrint(FILE* out);
#endif
-#ifdef DEBUG
- void BreakPointInfoVerify();
-#endif
+ DECLARE_VERIFIER(BreakPointInfo)
static const int kCodePositionIndex = Struct::kHeaderSize;
static const int kSourcePositionIndex = kCodePositionIndex + kPointerSize;
@@ -8574,6 +8954,7 @@ class BreakPointInfo: public Struct {
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_ACCESSORS
+#undef DECLARE_VERIFIER
#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
V(kSymbolTable, "symbol_table", "(Symbols)") \
@@ -8638,14 +9019,16 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a debug call target in the instruction stream.
virtual void VisitDebugTarget(RelocInfo* rinfo);
+ // Visits the byte sequence in a function's prologue that contains information
+ // about the code's age.
+ virtual void VisitCodeAgeSequence(RelocInfo* rinfo);
+
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
// Visit pointer embedded into a code object.
virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
- virtual void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {}
-
// Visits a contiguous arrays of external references (references to the C++
// heap) in the half-open range [start, end). Any or all of the values
// may be modified on return.
diff --git a/src/3rdparty/v8/src/optimizing-compiler-thread.cc b/src/3rdparty/v8/src/optimizing-compiler-thread.cc
new file mode 100644
index 0000000..83ff104
--- /dev/null
+++ b/src/3rdparty/v8/src/optimizing-compiler-thread.cc
@@ -0,0 +1,132 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// For Windows CE, Windows headers need to be included first as they define ASSERT
+#ifdef _WIN32_WCE
+# include "win32-headers.h"
+#endif
+
+#include "optimizing-compiler-thread.h"
+
+#include "v8.h"
+
+#include "hydrogen.h"
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+
+void OptimizingCompilerThread::Run() {
+#ifdef DEBUG
+ thread_id_ = ThreadId::Current().ToInteger();
+#endif
+ Isolate::SetIsolateThreadLocals(isolate_, NULL);
+
+ int64_t epoch = 0;
+ if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
+
+ while (true) {
+ input_queue_semaphore_->Wait();
+ if (Acquire_Load(&stop_thread_)) {
+ stop_semaphore_->Signal();
+ if (FLAG_trace_parallel_recompilation) {
+ time_spent_total_ = OS::Ticks() - epoch;
+ }
+ return;
+ }
+
+ int64_t compiling_start = 0;
+ if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
+
+ Heap::RelocationLock relocation_lock(isolate_->heap());
+ OptimizingCompiler* optimizing_compiler = NULL;
+ input_queue_.Dequeue(&optimizing_compiler);
+ Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
+
+ ASSERT(!optimizing_compiler->info()->closure()->IsOptimized());
+
+ OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
+ ASSERT(status != OptimizingCompiler::FAILED);
+ // Prevent an unused-variable error in release mode.
+ USE(status);
+
+ output_queue_.Enqueue(optimizing_compiler);
+ isolate_->stack_guard()->RequestCodeReadyEvent();
+
+ if (FLAG_trace_parallel_recompilation) {
+ time_spent_compiling_ += OS::Ticks() - compiling_start;
+ }
+ }
+}
+
+
+void OptimizingCompilerThread::Stop() {
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ input_queue_semaphore_->Signal();
+ stop_semaphore_->Wait();
+
+ if (FLAG_trace_parallel_recompilation) {
+ double compile_time = static_cast<double>(time_spent_compiling_);
+ double total_time = static_cast<double>(time_spent_total_);
+ double percentage = (compile_time * 100) / total_time;
+ PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
+ }
+}
+
+
+void OptimizingCompilerThread::InstallOptimizedFunctions() {
+ HandleScope handle_scope(isolate_);
+ int functions_installed = 0;
+ while (!output_queue_.IsEmpty()) {
+ OptimizingCompiler* compiler = NULL;
+ output_queue_.Dequeue(&compiler);
+ Compiler::InstallOptimizedCode(compiler);
+ functions_installed++;
+ }
+ if (FLAG_trace_parallel_recompilation && functions_installed != 0) {
+ PrintF(" ** Installed %d function(s).\n", functions_installed);
+ }
+}
+
+
+void OptimizingCompilerThread::QueueForOptimization(
+ OptimizingCompiler* optimizing_compiler) {
+ input_queue_.Enqueue(optimizing_compiler);
+ input_queue_semaphore_->Signal();
+}
+
+#ifdef DEBUG
+bool OptimizingCompilerThread::IsOptimizerThread() {
+ if (!FLAG_parallel_recompilation) return false;
+ return ThreadId::Current().ToInteger() == thread_id_;
+}
+#endif
+
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/optimizing-compiler-thread.h b/src/3rdparty/v8/src/optimizing-compiler-thread.h
new file mode 100644
index 0000000..d562726
--- /dev/null
+++ b/src/3rdparty/v8/src/optimizing-compiler-thread.h
@@ -0,0 +1,101 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OPTIMIZING_COMPILER_THREAD_H_
+#define V8_OPTIMIZING_COMPILER_THREAD_H_
+
+#include "atomicops.h"
+#include "platform.h"
+#include "flags.h"
+#include "unbound-queue.h"
+
+namespace v8 {
+namespace internal {
+
+class HGraphBuilder;
+class OptimizingCompiler;
+
+class OptimizingCompilerThread : public Thread {
+ public:
+ explicit OptimizingCompilerThread(Isolate *isolate) :
+ Thread("OptimizingCompilerThread"),
+ isolate_(isolate),
+ stop_semaphore_(OS::CreateSemaphore(0)),
+ input_queue_semaphore_(OS::CreateSemaphore(0)),
+ time_spent_compiling_(0),
+ time_spent_total_(0) {
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+ NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+ }
+
+ void Run();
+ void Stop();
+ void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
+ void InstallOptimizedFunctions();
+
+ inline bool IsQueueAvailable() {
+ // We don't need a barrier since we have a data dependency right
+ // after.
+ Atomic32 current_length = NoBarrier_Load(&queue_length_);
+
+ // This can be queried only from the execution thread.
+ ASSERT(!IsOptimizerThread());
+ // Since only the execution thread increments queue_length_ and
+ // only one thread can run inside an Isolate at one time, a direct
+ // doesn't introduce a race -- queue_length_ may decreased in
+ // meantime, but not increased.
+ return (current_length < FLAG_parallel_recompilation_queue_length);
+ }
+
+#ifdef DEBUG
+ bool IsOptimizerThread();
+#endif
+
+ ~OptimizingCompilerThread() {
+ delete input_queue_semaphore_;
+ delete stop_semaphore_;
+ }
+
+ private:
+ Isolate* isolate_;
+ Semaphore* stop_semaphore_;
+ Semaphore* input_queue_semaphore_;
+ UnboundQueue<OptimizingCompiler*> input_queue_;
+ UnboundQueue<OptimizingCompiler*> output_queue_;
+ volatile AtomicWord stop_thread_;
+ volatile Atomic32 queue_length_;
+ int64_t time_spent_compiling_;
+ int64_t time_spent_total_;
+
+#ifdef DEBUG
+ int thread_id_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_OPTIMIZING_COMPILER_THREAD_H_
diff --git a/src/3rdparty/v8/src/parser.cc b/src/3rdparty/v8/src/parser.cc
index b26a9f9..da4685f 100644
--- a/src/3rdparty/v8/src/parser.cc
+++ b/src/3rdparty/v8/src/parser.cc
@@ -86,8 +86,8 @@ class PositionStack {
};
-RegExpBuilder::RegExpBuilder()
- : zone_(Isolate::Current()->zone()),
+RegExpBuilder::RegExpBuilder(Zone* zone)
+ : zone_(zone),
pending_empty_(false),
characters_(NULL),
terms_(),
@@ -103,7 +103,7 @@ void RegExpBuilder::FlushCharacters() {
if (characters_ != NULL) {
RegExpTree* atom = new(zone()) RegExpAtom(characters_->ToConstVector());
characters_ = NULL;
- text_.Add(atom);
+ text_.Add(atom, zone());
LAST(ADD_ATOM);
}
}
@@ -115,12 +115,12 @@ void RegExpBuilder::FlushText() {
if (num_text == 0) {
return;
} else if (num_text == 1) {
- terms_.Add(text_.last());
+ terms_.Add(text_.last(), zone());
} else {
- RegExpText* text = new(zone()) RegExpText();
+ RegExpText* text = new(zone()) RegExpText(zone());
for (int i = 0; i < num_text; i++)
- text_.Get(i)->AppendToText(text);
- terms_.Add(text);
+ text_.Get(i)->AppendToText(text, zone());
+ terms_.Add(text, zone());
}
text_.Clear();
}
@@ -129,9 +129,9 @@ void RegExpBuilder::FlushText() {
void RegExpBuilder::AddCharacter(uc16 c) {
pending_empty_ = false;
if (characters_ == NULL) {
- characters_ = new(zone()) ZoneList<uc16>(4);
+ characters_ = new(zone()) ZoneList<uc16>(4, zone());
}
- characters_->Add(c);
+ characters_->Add(c, zone());
LAST(ADD_CHAR);
}
@@ -148,10 +148,10 @@ void RegExpBuilder::AddAtom(RegExpTree* term) {
}
if (term->IsTextElement()) {
FlushCharacters();
- text_.Add(term);
+ text_.Add(term, zone());
} else {
FlushText();
- terms_.Add(term);
+ terms_.Add(term, zone());
}
LAST(ADD_ATOM);
}
@@ -159,7 +159,7 @@ void RegExpBuilder::AddAtom(RegExpTree* term) {
void RegExpBuilder::AddAssertion(RegExpTree* assert) {
FlushText();
- terms_.Add(assert);
+ terms_.Add(assert, zone());
LAST(ADD_ASSERT);
}
@@ -178,9 +178,9 @@ void RegExpBuilder::FlushTerms() {
} else if (num_terms == 1) {
alternative = terms_.last();
} else {
- alternative = new(zone()) RegExpAlternative(terms_.GetList());
+ alternative = new(zone()) RegExpAlternative(terms_.GetList(zone()));
}
- alternatives_.Add(alternative);
+ alternatives_.Add(alternative, zone());
terms_.Clear();
LAST(ADD_NONE);
}
@@ -195,7 +195,7 @@ RegExpTree* RegExpBuilder::ToRegExp() {
if (num_alternatives == 1) {
return alternatives_.last();
}
- return new(zone()) RegExpDisjunction(alternatives_.GetList());
+ return new(zone()) RegExpDisjunction(alternatives_.GetList(zone()));
}
@@ -214,7 +214,7 @@ void RegExpBuilder::AddQuantifierToAtom(int min,
int num_chars = char_vector.length();
if (num_chars > 1) {
Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
- text_.Add(new(zone()) RegExpAtom(prefix));
+ text_.Add(new(zone()) RegExpAtom(prefix), zone());
char_vector = char_vector.SubVector(num_chars - 1, num_chars);
}
characters_ = NULL;
@@ -233,7 +233,7 @@ void RegExpBuilder::AddQuantifierToAtom(int min,
if (min == 0) {
return;
}
- terms_.Add(atom);
+ terms_.Add(atom, zone());
return;
}
} else {
@@ -241,7 +241,7 @@ void RegExpBuilder::AddQuantifierToAtom(int min,
UNREACHABLE();
return;
}
- terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom));
+ terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom), zone());
LAST(ADD_TERM);
}
@@ -270,7 +270,7 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
if (symbol_cache_.length() <= symbol_id) {
// Increase length to index + 1.
symbol_cache_.AddBlock(Handle<String>::null(),
- symbol_id + 1 - symbol_cache_.length());
+ symbol_id + 1 - symbol_cache_.length(), zone());
}
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
@@ -408,7 +408,7 @@ unsigned* ScriptDataImpl::ReadAddress(int position) {
Scope* Parser::NewScope(Scope* parent, ScopeType type) {
- Scope* result = new(zone()) Scope(parent, type);
+ Scope* result = new(zone()) Scope(parent, type, zone());
result->Initialize();
return result;
}
@@ -493,10 +493,10 @@ Parser::FunctionState::FunctionState(Parser* parser,
outer_function_state_(parser->current_function_state_),
outer_scope_(parser->top_scope_),
saved_ast_node_id_(isolate->ast_node_id()),
- factory_(isolate) {
+ factory_(isolate, parser->zone()) {
parser->top_scope_ = scope;
parser->current_function_state_ = this;
- isolate->set_ast_node_id(AstNode::kDeclarationsId + 1);
+ isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
}
@@ -532,13 +532,13 @@ Parser::FunctionState::~FunctionState() {
// ----------------------------------------------------------------------------
// Implementation of Parser
-Parser::Parser(Handle<Script> script,
+Parser::Parser(CompilationInfo* info,
int parser_flags,
v8::Extension* extension,
ScriptDataImpl* pre_data)
- : isolate_(script->GetIsolate()),
- symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
- script_(script),
+ : isolate_(info->isolate()),
+ symbol_cache_(pre_data ? pre_data->symbol_count() : 0, info->zone()),
+ script_(info->script()),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
top_scope_(NULL),
@@ -551,7 +551,10 @@ Parser::Parser(Handle<Script> script,
allow_lazy_((parser_flags & kAllowLazy) != 0),
allow_modules_((parser_flags & kAllowModules) != 0),
stack_overflow_(false),
- parenthesized_function_(false) {
+ parenthesized_function_(false),
+ zone_(info->zone()),
+ info_(info) {
+ ASSERT(!script_.is_null());
isolate_->set_ast_node_id(0);
if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
scanner().SetHarmonyScoping(true);
@@ -562,16 +565,17 @@ Parser::Parser(Handle<Script> script,
}
-FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
- ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
-
+FunctionLiteral* Parser::ParseProgram() {
+ ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- fni_ = new(zone()) FuncNameInferrer(isolate());
+ int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
source->TryFlatten();
+ FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
// Notice that the stream is destroyed at the end of the branch block.
// The last line of the blocks can't be moved outside, even though they're
@@ -579,12 +583,27 @@ FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(info, source, &zone_scope);
+ result = DoParseProgram(info(), source, &zone_scope);
} else {
GenericStringUtf16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream);
- return DoParseProgram(info, source, &zone_scope);
+ result = DoParseProgram(info(), source, &zone_scope);
+ }
+
+ if (FLAG_trace_parse && result != NULL) {
+ double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ if (info()->is_eval()) {
+ PrintF("[parsing eval");
+ } else if (info()->script()->name()->IsString()) {
+ String* name = String::cast(info()->script()->name());
+ SmartArrayPointer<char> name_chars = name->ToCString();
+ PrintF("[parsing script: %s", *name_chars);
+ } else {
+ PrintF("[parsing script");
+ }
+ PrintF(" - took %0.3f ms]\n", ms);
}
+ return result;
}
@@ -596,34 +615,37 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (pre_data_ != NULL) pre_data_->Initialize();
// Compute the parsing mode.
- mode_ = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
+ Mode mode = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
+ if (allow_natives_syntax_ || extension_ != NULL) mode = PARSE_EAGERLY;
+ ParsingModeScope parsing_mode(this, mode);
Handle<String> no_name = isolate()->factory()->empty_symbol();
FunctionLiteral* result = NULL;
{ Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
+ if (!info->context().is_null()) {
+ scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
+ }
if (info->is_eval()) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- if (!info->is_global() && (shared.is_null() || shared->is_function())) {
- scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
- }
if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
scope = NewScope(scope, EVAL_SCOPE);
}
+ } else if (info->is_global()) {
+ scope = NewScope(scope, GLOBAL_SCOPE);
}
scope->set_start_position(0);
scope->set_end_position(source->length());
- FunctionState function_state(this, scope, isolate());
+
+ FunctionState function_state(this, scope, isolate()); // Enters 'scope'.
top_scope_->SetLanguageMode(info->language_mode());
if (info->is_qml_mode()) {
scope->EnableQmlModeFlag();
}
- ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
+ ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
int beg_loc = scanner().location().beg_pos;
- ParseSourceElements(body, Token::EOS, info->is_eval(), &ok);
+ ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
if (ok && !top_scope_->is_classic_mode()) {
CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
}
@@ -645,7 +667,8 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::ANONYMOUS_EXPRESSION,
- FunctionLiteral::kGlobalOrEval);
+ FunctionLiteral::kGlobalOrEval,
+ FunctionLiteral::kNotParenthesized);
result->set_ast_properties(factory()->visitor()->ast_properties());
} else if (stack_overflow_) {
isolate()->StackOverflow();
@@ -662,45 +685,51 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
}
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
- ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
+FunctionLiteral* Parser::ParseLazy() {
+ ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
+ int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ Handle<SharedFunctionInfo> shared_info = info()->shared_info();
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
// Initialize parser state.
source->TryFlatten();
+ FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
ExternalTwoByteStringUtf16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source),
shared_info->start_position(),
shared_info->end_position());
- FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
- return result;
+ result = ParseLazy(&stream, &zone_scope);
} else {
GenericStringUtf16CharacterStream stream(source,
shared_info->start_position(),
shared_info->end_position());
- FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
- return result;
+ result = ParseLazy(&stream, &zone_scope);
}
+
+ if (FLAG_trace_parse && result != NULL) {
+ double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
+ PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
+ }
+ return result;
}
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
- Utf16CharacterStream* source,
+FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
ZoneScope* zone_scope) {
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
+ Handle<SharedFunctionInfo> shared_info = info()->shared_info();
scanner_.Initialize(source);
ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
- fni_ = new(zone()) FuncNameInferrer(isolate());
+ fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
fni_->PushEnclosingName(name);
- mode_ = PARSE_EAGERLY;
+ ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
// Place holder for the result.
FunctionLiteral* result = NULL;
@@ -708,15 +737,16 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
{
// Parse the function literal.
Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
- info->SetGlobalScope(scope);
- if (!info->closure().is_null()) {
- scope = Scope::DeserializeScopeChain(info->closure()->context(), scope);
+ info()->SetGlobalScope(scope);
+ if (!info()->closure().is_null()) {
+ scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
+ zone());
}
FunctionState function_state(this, scope, isolate());
- ASSERT(scope->language_mode() != STRICT_MODE || !info->is_classic_mode());
+ ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
ASSERT(scope->language_mode() != EXTENDED_MODE ||
- info->is_extended_mode());
- ASSERT(info->language_mode() == shared_info->language_mode());
+ info()->is_extended_mode());
+ ASSERT(info()->language_mode() == shared_info->language_mode());
scope->SetLanguageMode(shared_info->language_mode());
if (shared_info->qml_mode()) {
top_scope_->EnableQmlModeFlag();
@@ -808,152 +838,25 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
}
-// Base class containing common code for the different finder classes used by
-// the parser.
-class ParserFinder {
- protected:
- ParserFinder() {}
- static Assignment* AsAssignment(Statement* stat) {
- if (stat == NULL) return NULL;
- ExpressionStatement* exp_stat = stat->AsExpressionStatement();
- if (exp_stat == NULL) return NULL;
- return exp_stat->expression()->AsAssignment();
- }
-};
-
-
-// An InitializationBlockFinder finds and marks sequences of statements of the
-// form expr.a = ...; expr.b = ...; etc.
-class InitializationBlockFinder : public ParserFinder {
- public:
- // We find and mark the initialization blocks in top level
- // non-looping code only. This is because the optimization prevents
- // reuse of the map transitions, so it should be used only for code
- // that will only be run once.
- InitializationBlockFinder(Scope* top_scope, Target* target)
- : enabled_(top_scope->DeclarationScope()->is_global_scope() &&
- !IsLoopTarget(target)),
- first_in_block_(NULL),
- last_in_block_(NULL),
- block_size_(0) {}
-
- ~InitializationBlockFinder() {
- if (!enabled_) return;
- if (InBlock()) EndBlock();
- }
-
- void Update(Statement* stat) {
- if (!enabled_) return;
- Assignment* assignment = AsAssignment(stat);
- if (InBlock()) {
- if (BlockContinues(assignment)) {
- UpdateBlock(assignment);
- } else {
- EndBlock();
- }
- }
- if (!InBlock() && (assignment != NULL) &&
- (assignment->op() == Token::ASSIGN)) {
- StartBlock(assignment);
- }
- }
-
- private:
- // The minimum number of contiguous assignment that will
- // be treated as an initialization block. Benchmarks show that
- // the overhead exceeds the savings below this limit.
- static const int kMinInitializationBlock = 3;
-
- static bool IsLoopTarget(Target* target) {
- while (target != NULL) {
- if (target->node()->AsIterationStatement() != NULL) return true;
- target = target->previous();
- }
- return false;
- }
-
- // Returns true if the expressions appear to denote the same object.
- // In the context of initialization blocks, we only consider expressions
- // of the form 'expr.x' or expr["x"].
- static bool SameObject(Expression* e1, Expression* e2) {
- VariableProxy* v1 = e1->AsVariableProxy();
- VariableProxy* v2 = e2->AsVariableProxy();
- if (v1 != NULL && v2 != NULL) {
- return v1->name()->Equals(*v2->name());
- }
- Property* p1 = e1->AsProperty();
- Property* p2 = e2->AsProperty();
- if ((p1 == NULL) || (p2 == NULL)) return false;
- Literal* key1 = p1->key()->AsLiteral();
- Literal* key2 = p2->key()->AsLiteral();
- if ((key1 == NULL) || (key2 == NULL)) return false;
- if (!key1->handle()->IsString() || !key2->handle()->IsString()) {
- return false;
- }
- String* name1 = String::cast(*key1->handle());
- String* name2 = String::cast(*key2->handle());
- if (!name1->Equals(name2)) return false;
- return SameObject(p1->obj(), p2->obj());
- }
-
- // Returns true if the expressions appear to denote different properties
- // of the same object.
- static bool PropertyOfSameObject(Expression* e1, Expression* e2) {
- Property* p1 = e1->AsProperty();
- Property* p2 = e2->AsProperty();
- if ((p1 == NULL) || (p2 == NULL)) return false;
- return SameObject(p1->obj(), p2->obj());
- }
-
- bool BlockContinues(Assignment* assignment) {
- if ((assignment == NULL) || (first_in_block_ == NULL)) return false;
- if (assignment->op() != Token::ASSIGN) return false;
- return PropertyOfSameObject(first_in_block_->target(),
- assignment->target());
- }
-
- void StartBlock(Assignment* assignment) {
- first_in_block_ = assignment;
- last_in_block_ = assignment;
- block_size_ = 1;
- }
-
- void UpdateBlock(Assignment* assignment) {
- last_in_block_ = assignment;
- ++block_size_;
- }
-
- void EndBlock() {
- if (block_size_ >= kMinInitializationBlock) {
- first_in_block_->mark_block_start();
- last_in_block_->mark_block_end();
- }
- last_in_block_ = first_in_block_ = NULL;
- block_size_ = 0;
- }
-
- bool InBlock() { return first_in_block_ != NULL; }
-
- const bool enabled_;
- Assignment* first_in_block_;
- Assignment* last_in_block_;
- int block_size_;
-
- DISALLOW_COPY_AND_ASSIGN(InitializationBlockFinder);
-};
-
-
// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
// this.x = ...;, where x is a named property. It also determines whether a
// function contains only assignments of this type.
-class ThisNamedPropertyAssignmentFinder : public ParserFinder {
+class ThisNamedPropertyAssignmentFinder {
public:
- explicit ThisNamedPropertyAssignmentFinder(Isolate* isolate)
+ ThisNamedPropertyAssignmentFinder(Isolate* isolate, Zone* zone)
: isolate_(isolate),
only_simple_this_property_assignments_(true),
- names_(0),
- assigned_arguments_(0),
- assigned_constants_(0) {
+ names_(0, zone),
+ assigned_arguments_(0, zone),
+ assigned_constants_(0, zone),
+ zone_(zone) {
+ }
+
+ static Assignment* AsAssignment(Statement* stat) {
+ if (stat == NULL) return NULL;
+ ExpressionStatement* exp_stat = stat->AsExpressionStatement();
+ if (exp_stat == NULL) return NULL;
+ return exp_stat->expression()->AsAssignment();
}
void Update(Scope* scope, Statement* stat) {
@@ -1062,9 +965,9 @@ class ThisNamedPropertyAssignmentFinder : public ParserFinder {
return;
}
}
- names_.Add(name);
- assigned_arguments_.Add(index);
- assigned_constants_.Add(isolate_->factory()->undefined_value());
+ names_.Add(name, zone());
+ assigned_arguments_.Add(index, zone());
+ assigned_constants_.Add(isolate_->factory()->undefined_value(), zone());
}
void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
@@ -1076,9 +979,9 @@ class ThisNamedPropertyAssignmentFinder : public ParserFinder {
return;
}
}
- names_.Add(name);
- assigned_arguments_.Add(-1);
- assigned_constants_.Add(value);
+ names_.Add(name, zone());
+ assigned_arguments_.Add(-1, zone());
+ assigned_constants_.Add(value, zone());
}
void AssignmentFromSomethingElse() {
@@ -1090,23 +993,27 @@ class ThisNamedPropertyAssignmentFinder : public ParserFinder {
if (names_.capacity() == 0) {
ASSERT(assigned_arguments_.capacity() == 0);
ASSERT(assigned_constants_.capacity() == 0);
- names_.Initialize(4);
- assigned_arguments_.Initialize(4);
- assigned_constants_.Initialize(4);
+ names_.Initialize(4, zone());
+ assigned_arguments_.Initialize(4, zone());
+ assigned_constants_.Initialize(4, zone());
}
}
+ Zone* zone() const { return zone_; }
+
Isolate* isolate_;
bool only_simple_this_property_assignments_;
ZoneStringList names_;
ZoneList<int> assigned_arguments_;
ZoneObjectList assigned_constants_;
+ Zone* zone_;
};
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool is_eval,
+ bool is_global,
bool* ok) {
// SourceElements ::
// (ModuleElement)* <end_token>
@@ -1118,8 +1025,8 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
TargetScope scope(&this->target_stack_);
ASSERT(processor != NULL);
- InitializationBlockFinder block_finder(top_scope_, target_stack_);
- ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate());
+ ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate(),
+ zone());
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) {
@@ -1128,7 +1035,12 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
}
Scanner::Location token_loc = scanner().peek_location();
- Statement* stat = ParseModuleElement(NULL, CHECK_OK);
+ Statement* stat;
+ if (is_global && !is_eval) {
+ stat = ParseModuleElement(NULL, CHECK_OK);
+ } else {
+ stat = ParseBlockElement(NULL, CHECK_OK);
+ }
if (stat == NULL || stat->IsEmpty()) {
directive_prologue = false; // End of directive prologue.
continue;
@@ -1172,12 +1084,11 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
}
}
- block_finder.Update(stat);
// Find and mark all assignments to named properties in this (this.x =)
if (top_scope_->is_function_scope()) {
this_property_assignment_finder.Update(top_scope_, stat);
}
- processor->Add(stat);
+ processor->Add(stat, zone());
}
// Propagate the collected information on this property assignments.
@@ -1243,12 +1154,10 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
}
-Block* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
+Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
// ModuleDeclaration:
// 'module' Identifier Module
- // Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
Handle<String> name = ParseIdentifier(CHECK_OK);
#ifdef DEBUG
@@ -1272,10 +1181,11 @@ Block* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
}
#endif
- // TODO(rossberg): Add initialization statement to block.
-
- if (names) names->Add(name);
- return block;
+ if (names) names->Add(name, zone());
+ if (module->body() == NULL)
+ return factory()->NewEmptyStatement();
+ else
+ return module->body();
}
@@ -1323,16 +1233,14 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
{
BlockState block_state(this, scope);
- TargetCollector collector;
+ TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
- InitializationBlockFinder block_finder(top_scope_, target_stack_);
while (peek() != Token::RBRACE) {
Statement* stat = ParseModuleElement(NULL, CHECK_OK);
if (stat && !stat->IsEmpty()) {
- body->AddStatement(stat);
- block_finder.Update(stat);
+ body->AddStatement(stat, zone());
}
}
}
@@ -1341,16 +1249,23 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
scope->set_end_position(scanner().location().end_pos);
body->set_scope(scope);
- // Instance objects have to be created ahead of time (before code generation
- // linking them) because of potentially cyclic references between them.
- // We create them here, to avoid another pass over the AST.
+ // Check that all exports are bound.
Interface* interface = scope->interface();
+ for (Interface::Iterator it = interface->iterator();
+ !it.done(); it.Advance()) {
+ if (scope->LocalLookup(it.name()) == NULL) {
+ Handle<String> name(it.name());
+ ReportMessage("module_export_undefined",
+ Vector<Handle<String> >(&name, 1));
+ *ok = false;
+ return NULL;
+ }
+ }
+
interface->MakeModule(ok);
- ASSERT(ok);
- interface->MakeSingleton(Isolate::Current()->factory()->NewJSModule(), ok);
- ASSERT(ok);
+ ASSERT(*ok);
interface->Freeze(ok);
- ASSERT(ok);
+ ASSERT(*ok);
return factory()->NewModuleLiteral(body, interface);
}
@@ -1368,7 +1283,7 @@ Module* Parser::ParseModulePath(bool* ok) {
PrintF("# Path .%s ", name->ToAsciiArray());
#endif
Module* member = factory()->NewModulePath(result, name);
- result->interface()->Add(name, member->interface(), ok);
+ result->interface()->Add(name, member->interface(), zone(), ok);
if (!*ok) {
#ifdef DEBUG
if (FLAG_print_interfaces) {
@@ -1399,7 +1314,8 @@ Module* Parser::ParseModuleVariable(bool* ok) {
PrintF("# Module variable %s ", name->ToAsciiArray());
#endif
VariableProxy* proxy = top_scope_->NewUnresolved(
- factory(), name, scanner().location().beg_pos, Interface::NewModule());
+ factory(), name, Interface::NewModule(zone()),
+ scanner().location().beg_pos);
return factory()->NewModuleVariable(proxy);
}
@@ -1420,10 +1336,12 @@ Module* Parser::ParseModuleUrl(bool* ok) {
Module* result = factory()->NewModuleUrl(symbol);
Interface* interface = result->interface();
- interface->MakeSingleton(Isolate::Current()->factory()->NewJSModule(), ok);
- ASSERT(ok);
interface->Freeze(ok);
- ASSERT(ok);
+ ASSERT(*ok);
+ // Create dummy scope to avoid errors as long as the feature isn't finished.
+ Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ interface->Unify(scope->interface(), zone(), ok);
+ ASSERT(*ok);
return result;
}
@@ -1448,14 +1366,14 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
// TODO(ES6): implement destructuring ImportSpecifiers
Expect(Token::IMPORT, CHECK_OK);
- ZoneStringList names(1);
+ ZoneStringList names(1, zone());
Handle<String> name = ParseIdentifierName(CHECK_OK);
- names.Add(name);
+ names.Add(name, zone());
while (peek() == Token::COMMA) {
Consume(Token::COMMA);
name = ParseIdentifierName(CHECK_OK);
- names.Add(name);
+ names.Add(name, zone());
}
ExpectContextualKeyword("from", CHECK_OK);
@@ -1470,8 +1388,8 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Import %s ", names[i]->ToAsciiArray());
#endif
- Interface* interface = Interface::NewUnknown();
- module->interface()->Add(names[i], interface, ok);
+ Interface* interface = Interface::NewUnknown(zone());
+ module->interface()->Add(names[i], interface, zone(), ok);
if (!*ok) {
#ifdef DEBUG
if (FLAG_print_interfaces) {
@@ -1487,7 +1405,6 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
Declaration* declaration =
factory()->NewImportDeclaration(proxy, module, top_scope_);
Declare(declaration, true, CHECK_OK);
- // TODO(rossberg): Add initialization statement to block.
}
return block;
@@ -1506,17 +1423,17 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
Expect(Token::EXPORT, CHECK_OK);
Statement* result = NULL;
- ZoneStringList names(1);
+ ZoneStringList names(1, zone());
switch (peek()) {
case Token::IDENTIFIER: {
Handle<String> name = ParseIdentifier(CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
if (!name->IsEqualTo(CStrVector("module"))) {
- names.Add(name);
+ names.Add(name, zone());
while (peek() == Token::COMMA) {
Consume(Token::COMMA);
name = ParseIdentifier(CHECK_OK);
- names.Add(name);
+ names.Add(name, zone());
}
ExpectSemicolon(CHECK_OK);
result = factory()->NewEmptyStatement();
@@ -1549,8 +1466,10 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Export %s ", names[i]->ToAsciiArray());
#endif
- Interface* inner = Interface::NewUnknown();
- interface->Add(names[i], inner, CHECK_OK);
+ Interface* inner = Interface::NewUnknown(zone());
+ interface->Add(names[i], inner, zone(), CHECK_OK);
+ if (!*ok)
+ return NULL;
VariableProxy* proxy = NewUnresolved(names[i], LET, inner);
USE(proxy);
// TODO(rossberg): Rethink whether we actually need to store export
@@ -1683,7 +1602,7 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
if (statement) {
statement->set_statement_pos(statement_pos);
}
- if (result) result->AddStatement(statement);
+ if (result) result->AddStatement(statement, zone());
return result;
}
@@ -1726,7 +1645,7 @@ VariableProxy* Parser::NewUnresolved(
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
return DeclarationScope(mode)->NewUnresolved(
- factory(), name, scanner().location().beg_pos, interface);
+ factory(), name, interface, scanner().location().beg_pos);
}
@@ -1737,7 +1656,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
Scope* declaration_scope = DeclarationScope(mode);
Variable* var = NULL;
- // If a function scope exists, then we can statically declare this
+ // If a suitable scope exists, then we can statically declare this
// variable and also set its mode. In any case, a Declaration node
// will be added to the scope so that the declaration can be added
// to the corresponding activation frame at runtime if necessary.
@@ -1745,56 +1664,58 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// to the calling function context.
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
- // Also for block scoped let/const bindings the variable can be
- // statically declared.
if (declaration_scope->is_function_scope() ||
declaration_scope->is_strict_or_extended_eval_scope() ||
declaration_scope->is_block_scope() ||
declaration_scope->is_module_scope() ||
- declaration->AsModuleDeclaration() != NULL) {
- // Declare the variable in the function scope.
- var = declaration_scope->LocalLookup(name);
+ declaration_scope->is_global_scope()) {
+ // Declare the variable in the declaration scope.
+ // For the global scope, we have to check for collisions with earlier
+ // (i.e., enclosing) global scopes, to maintain the illusion of a single
+ // global scope.
+ var = declaration_scope->is_global_scope()
+ ? declaration_scope->Lookup(name)
+ : declaration_scope->LocalLookup(name);
if (var == NULL) {
// Declare the name.
var = declaration_scope->DeclareLocal(
name, mode, declaration->initialization(), proxy->interface());
- } else {
+ } else if ((mode != VAR || var->mode() != VAR) &&
+ (!declaration_scope->is_global_scope() ||
+ IsLexicalVariableMode(mode) ||
+ IsLexicalVariableMode(var->mode()))) {
// The name was declared in this scope before; check for conflicting
// re-declarations. We have a conflict if either of the declarations is
- // not a var. There is similar code in runtime.cc in the Declare
+ // not a var (in the global scope, we also have to ignore legacy const for
+ // compatibility). There is similar code in runtime.cc in the Declare
// functions. The function CheckNonConflictingScope checks for conflicting
// var and let bindings from different scopes whereas this is a check for
// conflicting declarations within the same scope. This check also covers
+ // the special case
//
// function () { let x; { var x; } }
//
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
- if ((mode != VAR) || (var->mode() != VAR)) {
- // We only have vars, consts and lets in declarations.
- ASSERT(var->mode() == VAR ||
- var->mode() == CONST ||
- var->mode() == CONST_HARMONY ||
- var->mode() == LET);
- if (is_extended_mode()) {
- // In harmony mode we treat re-declarations as early errors. See
- // ES5 16 for a definition of early errors.
- SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
- const char* elms[2] = { "Variable", *c_string };
- Vector<const char*> args(elms, 2);
- ReportMessage("redeclaration", args);
- *ok = false;
- return;
- }
- const char* type = (var->mode() == VAR)
- ? "var" : var->is_const_mode() ? "const" : "let";
- Handle<String> type_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
- Expression* expression =
- NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
- type_string, name);
- declaration_scope->SetIllegalRedeclaration(expression);
+ ASSERT(IsDeclaredVariableMode(var->mode()));
+ if (is_extended_mode()) {
+ // In harmony mode we treat re-declarations as early errors. See
+ // ES5 16 for a definition of early errors.
+ SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
+ const char* elms[2] = { "Variable", *c_string };
+ Vector<const char*> args(elms, 2);
+ ReportMessage("redeclaration", args);
+ *ok = false;
+ return;
}
+ const char* type =
+ (var->mode() == VAR) ? "var" : var->is_const_mode() ? "const" : "let";
+ Handle<String> type_string =
+ isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
+ Expression* expression =
+ NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
+ type_string, name);
+ declaration_scope->SetIllegalRedeclaration(expression);
}
}
@@ -1816,8 +1737,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// Runtime::DeclareContextSlot() calls.
declaration_scope->AddDeclaration(declaration);
- if ((mode == CONST || mode == CONST_HARMONY) &&
- declaration_scope->is_global_scope()) {
+ if (mode == CONST && declaration_scope->is_global_scope()) {
// For global const variables we bind the proxy to a variable.
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
@@ -1868,6 +1788,25 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// both access to the static and the dynamic context chain; the
// runtime needs to provide both.
if (resolve && var != NULL) {
+ if (declaration_scope->is_qml_mode()) {
+ Handle<GlobalObject> global = isolate_->global_object();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
+ // Get the context before the debugger was entered.
+ SaveContext *save = isolate_->save_context();
+ while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
+ save = save->prev();
+
+ global = Handle<GlobalObject>(save->context()->global_object());
+ }
+#endif
+
+ if (!global->HasProperty(*(proxy->name()))) {
+ var->set_is_qml_global(true);
+ }
+ }
+
proxy->BindTo(var);
if (FLAG_harmony_modules) {
@@ -1876,7 +1815,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Declare %s\n", var->name()->ToAsciiArray());
#endif
- proxy->interface()->Unify(var->interface(), &ok);
+ proxy->interface()->Unify(var->interface(), zone(), &ok);
if (!ok) {
#ifdef DEBUG
if (FLAG_print_interfaces) {
@@ -1942,7 +1881,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
- VariableProxy* proxy = NewUnresolved(name, VAR);
+ VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, VAR, top_scope_);
Declare(declaration, true, CHECK_OK);
@@ -1968,14 +1907,17 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
FunctionLiteral::DECLARATION,
CHECK_OK);
// Even if we're not at the top-level of the global or a function
- // scope, we treat is as such and introduce the function with it's
+ // scope, we treat it as such and introduce the function with its
// initial value upon entering the corresponding scope.
- VariableMode mode = is_extended_mode() ? LET : VAR;
- VariableProxy* proxy = NewUnresolved(name, mode);
+ // In extended mode, a function behaves as a lexical binding, except in the
+ // global scope.
+ VariableMode mode =
+ is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
+ VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_);
Declare(declaration, true, CHECK_OK);
- if (names) names->Add(name);
+ if (names) names->Add(name, zone());
return factory()->NewEmptyStatement();
}
@@ -1993,12 +1935,10 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
Block* result = factory()->NewBlock(labels, 16, false);
Target target(&this->target_stack_, result);
Expect(Token::LBRACE, CHECK_OK);
- InitializationBlockFinder block_finder(top_scope_, target_stack_);
while (peek() != Token::RBRACE) {
Statement* stat = ParseStatement(NULL, CHECK_OK);
if (stat && !stat->IsEmpty()) {
- result->AddStatement(stat);
- block_finder.Update(stat);
+ result->AddStatement(stat, zone());
}
}
Expect(Token::RBRACE, CHECK_OK);
@@ -2020,16 +1960,14 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
Expect(Token::LBRACE, CHECK_OK);
block_scope->set_start_position(scanner().location().beg_pos);
{ BlockState block_state(this, block_scope);
- TargetCollector collector;
+ TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
- InitializationBlockFinder block_finder(top_scope_, target_stack_);
while (peek() != Token::RBRACE) {
Statement* stat = ParseBlockElement(NULL, CHECK_OK);
if (stat && !stat->IsEmpty()) {
- body->AddStatement(stat);
- block_finder.Update(stat);
+ body->AddStatement(stat, zone());
}
}
}
@@ -2203,7 +2141,9 @@ Block* Parser::ParseVariableDeclarations(
// For let/const declarations in harmony mode, we can also immediately
// pre-resolve the proxy because it resides in the same scope as the
// declaration.
- VariableProxy* proxy = NewUnresolved(name, mode);
+ Interface* interface =
+ is_const ? Interface::NewConst() : Interface::NewValue();
+ VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, mode, top_scope_);
Declare(declaration, mode != VAR, CHECK_OK);
@@ -2214,7 +2154,7 @@ Block* Parser::ParseVariableDeclarations(
*ok = false;
return NULL;
}
- if (names) names->Add(name);
+ if (names) names->Add(name, zone());
// Parse initialization expression if present and/or needed. A
// declaration of the form:
@@ -2291,21 +2231,24 @@ Block* Parser::ParseVariableDeclarations(
// declaration statement has been executed. This is important in
// browsers where the global object (window) has lots of
// properties defined in prototype objects.
- if (initialization_scope->is_global_scope()) {
+ if (initialization_scope->is_global_scope() &&
+ !IsLexicalVariableMode(mode)) {
// Compute the arguments for the runtime call.
- ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
+ ZoneList<Expression*>* arguments =
+ new(zone()) ZoneList<Expression*>(3, zone());
// We have at least 1 parameter.
- arguments->Add(factory()->NewLiteral(name));
+ arguments->Add(factory()->NewLiteral(name), zone());
CallRuntime* initialize;
if (is_const) {
- arguments->Add(value);
+ arguments->Add(value, zone());
value = NULL; // zap the value to avoid the unnecessary assignment
int qml_mode = 0;
- if (top_scope_->is_qml_mode() && !Isolate::Current()->global()->HasProperty(*name))
+ if (top_scope_->is_qml_mode()
+ && !Isolate::Current()->global_object()->HasProperty(*name))
qml_mode = 1;
- arguments->Add(factory()->NewNumberLiteral(qml_mode));
+ arguments->Add(factory()->NewNumberLiteral(qml_mode), zone());
// Construct the call to Runtime_InitializeConstGlobal
// and add it to the initialization statement block.
@@ -2319,19 +2262,20 @@ Block* Parser::ParseVariableDeclarations(
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode));
+ arguments->Add(factory()->NewNumberLiteral(language_mode), zone());
int qml_mode = 0;
- if (top_scope_->is_qml_mode() && !Isolate::Current()->global()->HasProperty(*name))
+ if (top_scope_->is_qml_mode()
+ && !Isolate::Current()->global_object()->HasProperty(*name))
qml_mode = 1;
- arguments->Add(factory()->NewNumberLiteral(qml_mode));
+ arguments->Add(factory()->NewNumberLiteral(qml_mode), zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
// necessarily be stored in the global object in that case,
// which is why we need to generate a separate assignment node.
if (value != NULL && !inside_with()) {
- arguments->Add(value);
+ arguments->Add(value, zone());
value = NULL; // zap the value to avoid the unnecessary assignment
}
@@ -2345,7 +2289,8 @@ Block* Parser::ParseVariableDeclarations(
arguments);
}
- block->AddStatement(factory()->NewExpressionStatement(initialize));
+ block->AddStatement(factory()->NewExpressionStatement(initialize),
+ zone());
} else if (needs_init) {
// Constant initializations always assign to the declared constant which
// is always at the function scope level. This is only relevant for
@@ -2359,7 +2304,8 @@ Block* Parser::ParseVariableDeclarations(
ASSERT(value != NULL);
Assignment* assignment =
factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment));
+ block->AddStatement(factory()->NewExpressionStatement(assignment),
+ zone());
value = NULL;
}
@@ -2371,10 +2317,11 @@ Block* Parser::ParseVariableDeclarations(
// if they are inside a 'with' statement - they may change a 'with' object
// property).
VariableProxy* proxy =
- initialization_scope->NewUnresolved(factory(), name);
+ initialization_scope->NewUnresolved(factory(), name, interface);
Assignment* assignment =
factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment));
+ block->AddStatement(factory()->NewExpressionStatement(assignment),
+ zone());
}
if (fni_ != NULL) fni_->Leave();
@@ -2428,8 +2375,10 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
*ok = false;
return NULL;
}
- if (labels == NULL) labels = new(zone()) ZoneStringList(4);
- labels->Add(label);
+ if (labels == NULL) {
+ labels = new(zone()) ZoneStringList(4, zone());
+ }
+ labels->Add(label, zone());
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
@@ -2641,12 +2590,13 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
}
Expect(Token::COLON, CHECK_OK);
int pos = scanner().location().beg_pos;
- ZoneList<Statement*>* statements = new(zone()) ZoneList<Statement*>(5);
+ ZoneList<Statement*>* statements =
+ new(zone()) ZoneList<Statement*>(5, zone());
while (peek() != Token::CASE &&
peek() != Token::DEFAULT &&
peek() != Token::RBRACE) {
Statement* stat = ParseStatement(NULL, CHECK_OK);
- statements->Add(stat);
+ statements->Add(stat, zone());
}
return new(zone()) CaseClause(isolate(), label, statements, pos);
@@ -2667,11 +2617,11 @@ SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
Expect(Token::RPAREN, CHECK_OK);
bool default_seen = false;
- ZoneList<CaseClause*>* cases = new(zone()) ZoneList<CaseClause*>(4);
+ ZoneList<CaseClause*>* cases = new(zone()) ZoneList<CaseClause*>(4, zone());
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
- cases->Add(clause);
+ cases->Add(clause, zone());
}
Expect(Token::RBRACE, CHECK_OK);
@@ -2712,7 +2662,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::TRY, CHECK_OK);
- TargetCollector try_collector;
+ TargetCollector try_collector(zone());
Block* try_block;
{ Target target(&this->target_stack_, &try_collector);
@@ -2730,7 +2680,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
// then we will need to collect escaping targets from the catch
// block. Since we don't know yet if there will be a finally block, we
// always collect the targets.
- TargetCollector catch_collector;
+ TargetCollector catch_collector(zone());
Scope* catch_scope = NULL;
Variable* catch_variable = NULL;
Block* catch_block = NULL;
@@ -2785,7 +2735,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
index, try_block, catch_scope, catch_variable, catch_block);
statement->set_escaping_targets(try_collector.targets());
try_block = factory()->NewBlock(NULL, 1, false);
- try_block->AddStatement(statement);
+ try_block->AddStatement(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
}
@@ -2801,7 +2751,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
int index = current_function_state_->NextHandlerIndex();
result = factory()->NewTryFinallyStatement(index, try_block, finally_block);
// Combine the jump targets of the try block and the possible catch block.
- try_collector.targets()->AddAll(*catch_collector.targets());
+ try_collector.targets()->AddAll(*catch_collector.targets(), zone());
}
result->set_escaping_targets(try_collector.targets());
@@ -2875,12 +2825,14 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
for_scope->set_start_position(scanner().location().beg_pos);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
+ bool is_const = peek() == Token::CONST;
Handle<String> name;
Block* variable_statement =
ParseVariableDeclarations(kForStatement, NULL, NULL, &name, CHECK_OK);
if (peek() == Token::IN && !name.is_null()) {
- VariableProxy* each = top_scope_->NewUnresolved(factory(), name);
+ Interface* interface =
+ is_const ? Interface::NewConst() : Interface::NewValue();
ForInStatement* loop = factory()->NewForInStatement(labels);
Target target(&this->target_stack_, loop);
@@ -2888,11 +2840,13 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
+ VariableProxy* each =
+ top_scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
loop->Initialize(each, enumerable, body);
Block* result = factory()->NewBlock(NULL, 2, false);
- result->AddStatement(variable_statement);
- result->AddStatement(loop);
+ result->AddStatement(variable_statement, zone());
+ result->AddStatement(loop, zone());
top_scope_ = saved_scope;
for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
@@ -2925,25 +2879,33 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// TODO(keuchel): Move the temporary variable to the block scope, after
// implementing stack allocated block scoped variables.
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(name);
+ Factory* heap_factory = isolate()->factory();
+ Handle<String> tempstr =
+ heap_factory->NewConsString(heap_factory->dot_for_symbol(), name);
+ Handle<String> tempname = heap_factory->LookupSymbol(tempstr);
+ Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- VariableProxy* each = top_scope_->NewUnresolved(factory(), name);
ForInStatement* loop = factory()->NewForInStatement(labels);
Target target(&this->target_stack_, loop);
+ // The expression does not see the loop variable.
Expect(Token::IN, CHECK_OK);
+ top_scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
+ top_scope_ = for_scope;
Expect(Token::RPAREN, CHECK_OK);
+ VariableProxy* each =
+ top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
Statement* body = ParseStatement(NULL, CHECK_OK);
Block* body_block = factory()->NewBlock(NULL, 3, false);
Assignment* assignment = factory()->NewAssignment(
Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment);
- body_block->AddStatement(variable_statement);
- body_block->AddStatement(assignment_statement);
- body_block->AddStatement(body);
+ body_block->AddStatement(variable_statement, zone());
+ body_block->AddStatement(assignment_statement, zone());
+ body_block->AddStatement(body, zone());
loop->Initialize(temp_proxy, enumerable, body_block);
top_scope_ = saved_scope;
for_scope->set_end_position(scanner().location().end_pos);
@@ -3026,8 +2988,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// }
ASSERT(init != NULL);
Block* result = factory()->NewBlock(NULL, 2, false);
- result->AddStatement(init);
- result->AddStatement(loop);
+ result->AddStatement(init, zone());
+ result->AddStatement(loop, zone());
result->set_scope(for_scope);
if (loop) loop->Initialize(NULL, cond, next, body);
return result;
@@ -3409,6 +3371,12 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// should not point to the closing brace otherwise it will intersect
// with positions recorded for function literal and confuse debugger.
pos = scanner().peek_location().beg_pos;
+ // Also the trailing parenthesis are a hint that the function will
+ // be called immediately. If we happen to have parsed a preceding
+ // function literal eagerly, we can also compile it eagerly.
+ if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
+ result->AsFunctionLiteral()->set_parenthesized();
+ }
}
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
@@ -3470,7 +3438,7 @@ Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
if (!stack->is_empty()) {
int last = stack->pop();
result = factory()->NewCallNew(
- result, new(zone()) ZoneList<Expression*>(0), last);
+ result, new(zone()) ZoneList<Expression*>(0, zone()), last);
}
return result;
}
@@ -3660,9 +3628,9 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Variable %s ", name->ToAsciiArray());
#endif
- Interface* interface = Interface::NewUnknown();
+ Interface* interface = Interface::NewUnknown(zone());
result = top_scope_->NewUnresolved(
- factory(), name, scanner().location().beg_pos, interface);
+ factory(), name, interface, scanner().location().beg_pos);
break;
}
@@ -3760,7 +3728,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
- ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4);
+ ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone());
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
Expression* elem;
@@ -3769,7 +3737,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
} else {
elem = ParseAssignmentExpression(true, CHECK_OK);
}
- values->Add(elem);
+ values->Add(elem, zone());
if (peek() != Token::RBRACK) {
Expect(Token::COMMA, CHECK_OK);
}
@@ -3783,10 +3751,12 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Handle<FixedArray> object_literals =
isolate()->factory()->NewFixedArray(values->length(), TENURED);
Handle<FixedDoubleArray> double_literals;
- ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
+ ElementsKind elements_kind = FAST_SMI_ELEMENTS;
bool has_only_undefined_values = true;
+ bool has_hole_values = false;
// Fill in the literals.
+ Heap* heap = isolate()->heap();
bool is_simple = true;
int depth = 1;
for (int i = 0, n = values->length(); i < n; i++) {
@@ -3795,12 +3765,18 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
depth = m_literal->depth() + 1;
}
Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsUndefined()) {
+ if (boilerplate_value->IsTheHole()) {
+ has_hole_values = true;
object_literals->set_the_hole(i);
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
double_literals->set_the_hole(i);
}
+ } else if (boilerplate_value->IsUndefined()) {
is_simple = false;
+ object_literals->set(i, Smi::FromInt(0));
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ double_literals->set(i, 0);
+ }
} else {
// Examine each literal element, and adjust the ElementsKind if the
// literal element is not of a type that can be stored in the current
@@ -3810,7 +3786,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// ultimately end up in FAST_ELEMENTS.
has_only_undefined_values = false;
object_literals->set(i, *boilerplate_value);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (elements_kind == FAST_SMI_ELEMENTS) {
// Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
// FAST_ELEMENTS is required.
if (!boilerplate_value->IsSmi()) {
@@ -3858,7 +3834,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
elements_kind != FAST_DOUBLE_ELEMENTS) {
- object_literals->set_map(isolate()->heap()->fixed_cow_array_map());
+ object_literals->set_map(heap->fixed_cow_array_map());
}
Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
@@ -3870,6 +3846,10 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Handle<FixedArray> literals =
isolate()->factory()->NewFixedArray(2, TENURED);
+ if (has_hole_values || !FLAG_packed_arrays) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
+
literals->set(0, Smi::FromInt(elements_kind));
literals->set(1, *element_values);
@@ -4126,7 +4106,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// )*[','] '}'
ZoneList<ObjectLiteral::Property*>* properties =
- new(zone()) ZoneList<ObjectLiteral::Property*>(4);
+ new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone());
int number_of_boilerplate_properties = 0;
bool has_function = false;
@@ -4163,7 +4143,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
// Validate the property.
checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property);
+ properties->Add(property, zone());
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
if (fni_ != NULL) {
@@ -4231,7 +4211,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
// Validate the property
checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property);
+ properties->Add(property, zone());
// TODO(1240767): Consider allowing trailing comma.
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
@@ -4290,12 +4270,12 @@ ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
- ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4);
+ ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4, zone());
Expect(Token::LPAREN, CHECK_OK);
bool done = (peek() == Token::RPAREN);
while (!done) {
Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
- result->Add(argument);
+ result->Add(argument, zone());
if (result->length() > kMaxNumFunctionParameters) {
ReportMessageAt(scanner().location(), "too_many_arguments",
Vector<const char*>::empty());
@@ -4341,6 +4321,7 @@ class SingletonLogger : public ParserRecorder {
int end,
const char* message,
const char* argument_opt) {
+ if (has_error_) return;
has_error_ = true;
start_ = start;
end_ = end;
@@ -4435,6 +4416,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
Handle<FixedArray> this_property_assignments;
FunctionLiteral::ParameterFlag duplicate_parameters =
FunctionLiteral::kNoDuplicateParameters;
+ FunctionLiteral::IsParenthesizedFlag parenthesized = parenthesized_function_
+ ? FunctionLiteral::kIsParenthesized
+ : FunctionLiteral::kNotParenthesized;
AstProperties ast_properties;
// Parse function body.
{ FunctionState function_state(this, scope, isolate());
@@ -4495,7 +4479,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
fvar = new(zone()) Variable(top_scope_,
function_name, fvar_mode, true /* is valid LHS */,
- Variable::NORMAL, kCreatedInitialized);
+ Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
VariableDeclaration* fvar_declaration =
factory()->NewVariableDeclaration(proxy, fvar_mode, top_scope_);
@@ -4506,7 +4490,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// The heuristics are:
// - It must not have been prohibited by the caller to Parse (some callers
// need a full AST).
- // - The outer scope must be trivial (only global variables in scope).
+ // - The outer scope must allow lazy compilation of inner functions.
// - The function mustn't be a function expression with an open parenthesis
// before; we consider that a hint that the function will be called
// immediately, and it would be a waste of time to make it lazily
@@ -4514,8 +4498,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// These are all things we can know at this point, without looking at the
// function itself.
bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
- top_scope_->outer_scope()->is_global_scope() &&
- top_scope_->HasTrivialOuterContext() &&
+ top_scope_->AllowsLazyCompilation() &&
!parenthesized_function_);
parenthesized_function_ = false; // The bit was set for this function only.
@@ -4584,18 +4567,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
}
if (!is_lazily_compiled) {
- body = new(zone()) ZoneList<Statement*>(8);
+ ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+ body = new(zone()) ZoneList<Statement*>(8, zone());
if (fvar != NULL) {
- VariableProxy* fproxy =
- top_scope_->NewUnresolved(factory(), function_name);
+ VariableProxy* fproxy = top_scope_->NewUnresolved(
+ factory(), function_name, Interface::NewConst());
fproxy->BindTo(fvar);
body->Add(factory()->NewExpressionStatement(
factory()->NewAssignment(fvar_init_op,
fproxy,
factory()->NewThisFunction(),
- RelocInfo::kNoPosition)));
+ RelocInfo::kNoPosition)),
+ zone());
}
- ParseSourceElements(body, Token::RBRACE, false, CHECK_OK);
+ ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
@@ -4673,7 +4658,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
num_parameters,
duplicate_parameters,
type,
- FunctionLiteral::kIsFunction);
+ FunctionLiteral::kIsFunction,
+ parenthesized);
function_literal->set_function_token_position(function_token_position);
function_literal->set_ast_properties(&ast_properties);
@@ -4745,6 +4731,13 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
return NULL;
}
+ // Check that the function is defined if it's an inline runtime call.
+ if (function == NULL && name->Get(0) == '_') {
+ ReportMessage("not_defined", Vector<Handle<String> >(&name, 1));
+ *ok = false;
+ return NULL;
+ }
+
// We have a valid intrinsics call or a call to a builtin.
return factory()->NewCallRuntime(name, function, args);
}
@@ -4992,7 +4985,7 @@ void Parser::RegisterTargetUse(Label* target, Target* stop) {
// the break target to any TargetCollectors passed on the stack.
for (Target* t = target_stack_; t != stop; t = t->previous()) {
TargetCollector* collector = t->node()->AsTargetCollector();
- if (collector != NULL) collector->AddTarget(target);
+ if (collector != NULL) collector->AddTarget(target, zone());
}
}
@@ -5039,9 +5032,9 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
elements, FAST_ELEMENTS, TENURED);
- ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2);
- args->Add(factory()->NewLiteral(type));
- args->Add(factory()->NewLiteral(array));
+ ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone());
+ args->Add(factory()->NewLiteral(type), zone());
+ args->Add(factory()->NewLiteral(array), zone());
CallRuntime* call_constructor =
factory()->NewCallRuntime(constructor, NULL, args);
return factory()->NewThrow(call_constructor, scanner().location().beg_pos);
@@ -5053,8 +5046,10 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
RegExpParser::RegExpParser(FlatStringReader* in,
Handle<String>* error,
- bool multiline)
+ bool multiline,
+ Zone* zone)
: isolate_(Isolate::Current()),
+ zone_(zone),
error_(error),
captures_(NULL),
in_(in),
@@ -5085,7 +5080,7 @@ void RegExpParser::Advance() {
StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
ReportError(CStrVector(Isolate::kStackOverflowMessage));
- } else if (isolate()->zone()->excess_allocation()) {
+ } else if (zone()->excess_allocation()) {
ReportError(CStrVector("Regular expression too large"));
} else {
current_ = in()->Get(next_pos_);
@@ -5150,7 +5145,7 @@ RegExpTree* RegExpParser::ParsePattern() {
// Atom Quantifier
RegExpTree* RegExpParser::ParseDisjunction() {
// Used to store current state while parsing subexpressions.
- RegExpParserState initial_state(NULL, INITIAL, 0);
+ RegExpParserState initial_state(NULL, INITIAL, 0, zone());
RegExpParserState* stored_state = &initial_state;
// Cache the builder in a local variable for quick access.
RegExpBuilder* builder = initial_state.builder();
@@ -5235,8 +5230,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance();
// everything except \x0a, \x0d, \u2028 and \u2029
ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2);
- CharacterRange::AddClassEscape('.', ranges);
+ new(zone()) ZoneList<CharacterRange>(2, zone());
+ CharacterRange::AddClassEscape('.', ranges, zone());
RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
builder->AddAtom(atom);
break;
@@ -5262,17 +5257,16 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
} else {
if (captures_ == NULL) {
- captures_ = new(zone()) ZoneList<RegExpCapture*>(2);
+ captures_ = new(zone()) ZoneList<RegExpCapture*>(2, zone());
}
if (captures_started() >= kMaxCaptures) {
ReportError(CStrVector("Too many captures") CHECK_FAILED);
}
- captures_->Add(NULL);
+ captures_->Add(NULL, zone());
}
// Store current state and begin new disjunction parsing.
- stored_state = new(zone()) RegExpParserState(stored_state,
- type,
- captures_started());
+ stored_state = new(zone()) RegExpParserState(stored_state, type,
+ captures_started(), zone());
builder = stored_state->builder();
continue;
}
@@ -5306,8 +5300,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
uc32 c = Next();
Advance(2);
ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2);
- CharacterRange::AddClassEscape(c, ranges);
+ new(zone()) ZoneList<CharacterRange>(2, zone());
+ CharacterRange::AddClassEscape(c, ranges, zone());
RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
builder->AddAtom(atom);
break;
@@ -5782,11 +5776,12 @@ static const uc16 kNoCharClass = 0;
// escape (i.e., 's' means whitespace, from '\s').
static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
uc16 char_class,
- CharacterRange range) {
+ CharacterRange range,
+ Zone* zone) {
if (char_class != kNoCharClass) {
- CharacterRange::AddClassEscape(char_class, ranges);
+ CharacterRange::AddClassEscape(char_class, ranges, zone);
} else {
- ranges->Add(range);
+ ranges->Add(range, zone);
}
}
@@ -5802,7 +5797,8 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
is_negated = true;
Advance();
}
- ZoneList<CharacterRange>* ranges = new(zone()) ZoneList<CharacterRange>(2);
+ ZoneList<CharacterRange>* ranges =
+ new(zone()) ZoneList<CharacterRange>(2, zone());
while (has_more() && current() != ']') {
uc16 char_class = kNoCharClass;
CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
@@ -5813,25 +5809,25 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
// following code report an error.
break;
} else if (current() == ']') {
- AddRangeOrEscape(ranges, char_class, first);
- ranges->Add(CharacterRange::Singleton('-'));
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ ranges->Add(CharacterRange::Singleton('-'), zone());
break;
}
uc16 char_class_2 = kNoCharClass;
CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
// Either end is an escaped character class. Treat the '-' verbatim.
- AddRangeOrEscape(ranges, char_class, first);
- ranges->Add(CharacterRange::Singleton('-'));
- AddRangeOrEscape(ranges, char_class_2, next);
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ ranges->Add(CharacterRange::Singleton('-'), zone());
+ AddRangeOrEscape(ranges, char_class_2, next, zone());
continue;
}
if (first.from() > next.to()) {
return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
}
- ranges->Add(CharacterRange::Range(first.from(), next.to()));
+ ranges->Add(CharacterRange::Range(first.from(), next.to()), zone());
} else {
- AddRangeOrEscape(ranges, char_class, first);
+ AddRangeOrEscape(ranges, char_class, first, zone());
}
}
if (!has_more()) {
@@ -5839,7 +5835,7 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
}
Advance();
if (ranges->length() == 0) {
- ranges->Add(CharacterRange::Everything());
+ ranges->Add(CharacterRange::Everything(), zone());
is_negated = !is_negated;
}
return new(zone()) RegExpCharacterClass(ranges, is_negated);
@@ -5947,31 +5943,6 @@ static ScriptDataImpl* DoPreParse(Utf16CharacterStream* source,
}
-// Preparse, but only collect data that is immediately useful,
-// even if the preparser data is only used once.
-ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
- v8::Extension* extension,
- int flags) {
- bool allow_lazy = FLAG_lazy && (extension == NULL);
- if (!allow_lazy) {
- // Partial preparsing is only about lazily compiled functions.
- // If we don't allow lazy compilation, the log data will be empty.
- return NULL;
- }
- flags |= kAllowLazy;
- PartialParserRecorder recorder;
- int source_length = source->length();
- if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUtf16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source), 0, source_length);
- return DoPreParse(&stream, flags, &recorder);
- } else {
- GenericStringUtf16CharacterStream stream(source, 0, source_length);
- return DoPreParse(&stream, flags, &recorder);
- }
-}
-
-
ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
v8::Extension* extension,
int flags) {
@@ -5986,9 +5957,10 @@ ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
bool RegExpParser::ParseRegExp(FlatStringReader* input,
bool multiline,
- RegExpCompileData* result) {
+ RegExpCompileData* result,
+ Zone* zone) {
ASSERT(result != NULL);
- RegExpParser parser(input, &result->error, multiline);
+ RegExpParser parser(input, &result->error, multiline, zone);
RegExpTree* tree = parser.ParsePattern();
if (parser.failed()) {
ASSERT(tree == NULL);
@@ -6009,7 +5981,6 @@ bool RegExpParser::ParseRegExp(FlatStringReader* input,
bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
ASSERT(info->function() == NULL);
FunctionLiteral* result = NULL;
- Handle<Script> script = info->script();
ASSERT((parsing_flags & kLanguageModeMask) == CLASSIC_MODE);
if (!info->is_native() && FLAG_harmony_scoping) {
// Harmony scoping is requested.
@@ -6024,15 +5995,15 @@ bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
}
if (info->is_lazy()) {
ASSERT(!info->is_eval());
- Parser parser(script, parsing_flags, NULL, NULL);
+ Parser parser(info, parsing_flags, NULL, NULL);
if (info->shared_info()->is_function()) {
- result = parser.ParseLazy(info);
+ result = parser.ParseLazy();
} else {
- result = parser.ParseProgram(info);
+ result = parser.ParseProgram();
}
} else {
ScriptDataImpl* pre_data = info->pre_parse_data();
- Parser parser(script, parsing_flags, info->extension(), pre_data);
+ Parser parser(info, parsing_flags, info->extension(), pre_data);
if (pre_data != NULL && pre_data->has_error()) {
Scanner::Location loc = pre_data->MessageLocation();
const char* message = pre_data->BuildMessage();
@@ -6045,7 +6016,7 @@ bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
DeleteArray(args.start());
ASSERT(info->isolate()->has_pending_exception());
} else {
- result = parser.ParseProgram(info);
+ result = parser.ParseProgram();
}
}
info->SetFunction(result);
diff --git a/src/3rdparty/v8/src/parser.h b/src/3rdparty/v8/src/parser.h
index b4d8825..93fd1b8 100644
--- a/src/3rdparty/v8/src/parser.h
+++ b/src/3rdparty/v8/src/parser.h
@@ -175,12 +175,6 @@ class ParserApi {
static ScriptDataImpl* PreParse(Utf16CharacterStream* source,
v8::Extension* extension,
int flags);
-
- // Preparser that only does preprocessing that makes sense if only used
- // immediately after.
- static ScriptDataImpl* PartialPreParse(Handle<String> source,
- v8::Extension* extension,
- int flags);
};
// ----------------------------------------------------------------------------
@@ -200,12 +194,12 @@ class BufferedZoneList {
// Adds element at end of list. This element is buffered and can
// be read using last() or removed using RemoveLast until a new Add or until
// RemoveLast or GetList has been called.
- void Add(T* value) {
+ void Add(T* value, Zone* zone) {
if (last_ != NULL) {
if (list_ == NULL) {
- list_ = new ZoneList<T*>(initial_size);
+ list_ = new(zone) ZoneList<T*>(initial_size, zone);
}
- list_->Add(last_);
+ list_->Add(last_, zone);
}
last_ = value;
}
@@ -250,12 +244,12 @@ class BufferedZoneList {
return length + ((last_ == NULL) ? 0 : 1);
}
- ZoneList<T*>* GetList() {
+ ZoneList<T*>* GetList(Zone* zone) {
if (list_ == NULL) {
- list_ = new ZoneList<T*>(initial_size);
+ list_ = new(zone) ZoneList<T*>(initial_size, zone);
}
if (last_ != NULL) {
- list_->Add(last_);
+ list_->Add(last_, zone);
last_ = NULL;
}
return list_;
@@ -270,7 +264,7 @@ class BufferedZoneList {
// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
class RegExpBuilder: public ZoneObject {
public:
- RegExpBuilder();
+ explicit RegExpBuilder(Zone* zone);
void AddCharacter(uc16 character);
// "Adds" an empty expression. Does nothing except consume a
// following quantifier
@@ -285,7 +279,7 @@ class RegExpBuilder: public ZoneObject {
void FlushCharacters();
void FlushText();
void FlushTerms();
- Zone* zone() { return zone_; }
+ Zone* zone() const { return zone_; }
Zone* zone_;
bool pending_empty_;
@@ -306,11 +300,13 @@ class RegExpParser {
public:
RegExpParser(FlatStringReader* in,
Handle<String>* error,
- bool multiline_mode);
+ bool multiline_mode,
+ Zone* zone);
static bool ParseRegExp(FlatStringReader* input,
bool multiline,
- RegExpCompileData* result);
+ RegExpCompileData* result,
+ Zone* zone);
RegExpTree* ParsePattern();
RegExpTree* ParseDisjunction();
@@ -368,9 +364,10 @@ class RegExpParser {
public:
RegExpParserState(RegExpParserState* previous_state,
SubexpressionType group_type,
- int disjunction_capture_index)
+ int disjunction_capture_index,
+ Zone* zone)
: previous_state_(previous_state),
- builder_(new RegExpBuilder()),
+ builder_(new(zone) RegExpBuilder(zone)),
group_type_(group_type),
disjunction_capture_index_(disjunction_capture_index) {}
// Parser state of containing expression, if any.
@@ -397,7 +394,7 @@ class RegExpParser {
};
Isolate* isolate() { return isolate_; }
- Zone* zone() { return isolate_->zone(); }
+ Zone* zone() const { return zone_; }
uc32 current() { return current_; }
bool has_more() { return has_more_; }
@@ -407,6 +404,7 @@ class RegExpParser {
void ScanForCaptures();
Isolate* isolate_;
+ Zone* zone_;
Handle<String>* error_;
ZoneList<RegExpCapture*>* captures_;
FlatStringReader* in_;
@@ -430,7 +428,7 @@ class SingletonLogger;
class Parser {
public:
- Parser(Handle<Script> script,
+ Parser(CompilationInfo* info,
int parsing_flags, // Combination of ParsingFlags
v8::Extension* extension,
ScriptDataImpl* pre_data);
@@ -440,8 +438,8 @@ class Parser {
}
// Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram(CompilationInfo* info);
- FunctionLiteral* ParseLazy(CompilationInfo* info);
+ FunctionLiteral* ParseProgram();
+ FunctionLiteral* ParseLazy();
void ReportMessageAt(Scanner::Location loc,
const char* message,
@@ -456,7 +454,7 @@ class Parser {
// construct a hashable id, so if more than 2^17 are allowed, this
// should be checked.
static const int kMaxNumFunctionParameters = 32766;
- static const int kMaxNumFunctionLocals = 32767;
+ static const int kMaxNumFunctionLocals = 131071; // 2^17-1
enum Mode {
PARSE_LAZILY,
@@ -538,15 +536,28 @@ class Parser {
AstNodeFactory<AstConstructionVisitor> factory_;
};
+ class ParsingModeScope BASE_EMBEDDED {
+ public:
+ ParsingModeScope(Parser* parser, Mode mode)
+ : parser_(parser),
+ old_mode_(parser->mode()) {
+ parser_->mode_ = mode;
+ }
+ ~ParsingModeScope() {
+ parser_->mode_ = old_mode_;
+ }
+ private:
+ Parser* parser_;
+ Mode old_mode_;
+ };
-
- FunctionLiteral* ParseLazy(CompilationInfo* info,
- Utf16CharacterStream* source,
+ FunctionLiteral* ParseLazy(Utf16CharacterStream* source,
ZoneScope* zone_scope);
Isolate* isolate() { return isolate_; }
- Zone* zone() { return isolate_->zone(); }
+ Zone* zone() const { return zone_; }
+ CompilationInfo* info() const { return info_; }
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(CompilationInfo* info,
@@ -568,7 +579,7 @@ class Parser {
return top_scope_->is_extended_mode();
}
Scope* DeclarationScope(VariableMode mode) {
- return (mode == LET || mode == CONST_HARMONY)
+ return IsLexicalVariableMode(mode)
? top_scope_ : top_scope_->DeclarationScope();
}
@@ -579,10 +590,10 @@ class Parser {
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
// for failure at the call sites.
- void* ParseSourceElements(ZoneList<Statement*>* processor,
- int end_token, bool is_eval, bool* ok);
+ void* ParseSourceElements(ZoneList<Statement*>* processor, int end_token,
+ bool is_eval, bool is_global, bool* ok);
Statement* ParseModuleElement(ZoneStringList* labels, bool* ok);
- Block* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
+ Statement* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
Module* ParseModule(bool* ok);
Module* ParseModuleLiteral(bool* ok);
Module* ParseModulePath(bool* ok);
@@ -767,7 +778,7 @@ class Parser {
// Parser support
VariableProxy* NewUnresolved(Handle<String> name,
VariableMode mode,
- Interface* interface = Interface::NewValue());
+ Interface* interface);
void Declare(Declaration* declaration, bool resolve, bool* ok);
bool TargetStackContainsLabel(Handle<String> label);
@@ -834,6 +845,8 @@ class Parser {
// so never lazily compile it.
bool parenthesized_function_;
+ Zone* zone_;
+ CompilationInfo* info_;
friend class BlockState;
friend class FunctionState;
};
diff --git a/src/3rdparty/v8/src/platform-cygwin.cc b/src/3rdparty/v8/src/platform-cygwin.cc
index 089ea38..24e256a 100644
--- a/src/3rdparty/v8/src/platform-cygwin.cc
+++ b/src/3rdparty/v8/src/platform-cygwin.cc
@@ -359,6 +359,12 @@ bool VirtualMemory::Guard(void* address) {
}
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
diff --git a/src/3rdparty/v8/src/platform-freebsd.cc b/src/3rdparty/v8/src/platform-freebsd.cc
index 511759c..1da4605 100644
--- a/src/3rdparty/v8/src/platform-freebsd.cc
+++ b/src/3rdparty/v8/src/platform-freebsd.cc
@@ -456,6 +456,12 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
pthread_t thread_; // Thread handle for pthread.
diff --git a/src/3rdparty/v8/src/platform-linux.cc b/src/3rdparty/v8/src/platform-linux.cc
index 18f59dd..e6c328f 100644
--- a/src/3rdparty/v8/src/platform-linux.cc
+++ b/src/3rdparty/v8/src/platform-linux.cc
@@ -53,6 +53,13 @@
#include <errno.h>
#include <stdarg.h>
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+ defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+#include <asm/sigcontext.h>
+#endif
+
#undef MAP_TYPE
#include "v8.h"
@@ -132,12 +139,18 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
// facility is universally available on the ARM architectures,
// so it's up to individual OSes to provide such.
switch (feature) {
+ case VFP2:
+ search_string = "vfp";
+ break;
case VFP3:
search_string = "vfpv3";
break;
case ARMv7:
search_string = "ARMv7";
break;
+ case SUDIV:
+ search_string = "idiva";
+ break;
default:
UNREACHABLE();
}
@@ -161,6 +174,23 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
}
+CpuImplementer OS::GetCpuImplementer() {
+ static bool use_cached_value = false;
+ static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
+ if (use_cached_value) {
+ return cached_value;
+ }
+ if (CPUInfoContainsString("CPU implementer\t: 0x41")) {
+ cached_value = ARM_IMPLEMENTER;
+ } else if (CPUInfoContainsString("CPU implementer\t: 0x51")) {
+ cached_value = QUALCOMM_IMPLEMENTER;
+ } else {
+ cached_value = UNKNOWN_IMPLEMENTER;
+ }
+ use_cached_value = true;
+ return cached_value;
+}
+
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
// the Floating Point ABI used (PCS stands for Procedure Call Standard).
@@ -197,6 +227,7 @@ bool OS::ArmUsingHardFloat() {
#endif
#undef GCC_VERSION
}
+
#endif // def __arm__
@@ -501,9 +532,6 @@ void OS::LogSharedLibraryAddresses() {
}
-static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
-
-
void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
@@ -514,7 +542,7 @@ void OS::SignalCodeMovingGC() {
// by the kernel and allows us to synchronize V8 code log and the
// kernel log.
int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(kGCFakeMmap, "w+");
+ FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
void* addr = mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_EXEC,
@@ -693,6 +721,11 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
+bool VirtualMemory::HasLazyCommits() {
+ return true;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
@@ -903,32 +936,30 @@ Semaphore* OS::CreateSemaphore(int count) {
}
-#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
-// Android runs a fairly new Linux kernel, so signal info is there,
-// but the C library doesn't have the structs defined.
+#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+// Not all versions of Android's C library provide ucontext_t.
+// Detect this and provide custom but compatible definitions. Note that these
+// follow the GLibc naming convention to access register values from
+// mcontext_t.
+//
+// See http://code.google.com/p/android/issues/detail?id=34784
+
+#if defined(__arm__)
-struct sigcontext {
- uint32_t trap_no;
- uint32_t error_code;
- uint32_t oldmask;
- uint32_t gregs[16];
- uint32_t arm_cpsr;
- uint32_t fault_address;
-};
-typedef uint32_t __sigset_t;
typedef struct sigcontext mcontext_t;
+
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
- __sigset_t uc_sigmask;
+ // Other fields are not used by V8, don't define them here.
} ucontext_t;
-enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
-#elif !defined(__GLIBC__) && defined(__mips__)
+#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
-struct sigcontext {
+typedef struct {
uint32_t regmask;
uint32_t status;
uint64_t pc;
@@ -947,44 +978,44 @@ struct sigcontext {
uint32_t lo2;
uint32_t hi3;
uint32_t lo3;
-};
-typedef uint32_t __sigset_t;
-typedef struct sigcontext mcontext_t;
+} mcontext_t;
+
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
- __sigset_t uc_sigmask;
+ // Other fields are not used by V8, don't define them here.
} ucontext_t;
-#elif !defined(__GLIBC__) && defined(__i386__)
+#elif defined(__i386__)
// x86 version for Android.
-struct sigcontext {
+typedef struct {
uint32_t gregs[19];
void* fpregs;
uint32_t oldmask;
uint32_t cr2;
-};
+} mcontext_t;
-typedef uint32_t __sigset_t;
-typedef struct sigcontext mcontext_t;
+typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
- __sigset_t uc_sigmask;
+ // Other fields are not used by V8, don't define them here.
} ucontext_t;
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
#endif
+#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
static int GetThreadID() {
- // Glibc doesn't provide a wrapper for gettid(2).
-#if defined(ANDROID)
- return syscall(__NR_gettid);
+#if defined(__ANDROID__)
+ // Android's C library provides gettid(2).
+ return gettid();
#else
+ // Glibc doesn't provide a wrapper for gettid(2).
return syscall(SYS_gettid);
#endif
}
@@ -1023,8 +1054,10 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
-// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
-#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
+ (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+ // Old GLibc ARM versions used a gregs[] array to access the register
+ // values from mcontext_t.
sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
@@ -1032,7 +1065,8 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
+ // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
#elif V8_HOST_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(mcontext.pc);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
diff --git a/src/3rdparty/v8/src/platform-macos.cc b/src/3rdparty/v8/src/platform-macos.cc
index a937ed3..22d2bcf 100644
--- a/src/3rdparty/v8/src/platform-macos.cc
+++ b/src/3rdparty/v8/src/platform-macos.cc
@@ -471,6 +471,11 @@ bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
}
+bool VirtualMemory::HasLazyCommits() {
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
@@ -682,17 +687,27 @@ Mutex* OS::CreateMutex() {
class MacOSSemaphore : public Semaphore {
public:
explicit MacOSSemaphore(int count) {
- semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count);
+ int r;
+ r = semaphore_create(mach_task_self(),
+ &semaphore_,
+ SYNC_POLICY_FIFO,
+ count);
+ ASSERT(r == KERN_SUCCESS);
}
~MacOSSemaphore() {
- semaphore_destroy(mach_task_self(), semaphore_);
+ int r;
+ r = semaphore_destroy(mach_task_self(), semaphore_);
+ ASSERT(r == KERN_SUCCESS);
}
- // The MacOS mach semaphore documentation claims it does not have spurious
- // wakeups, the way pthreads semaphores do. So the code from the linux
- // platform is not needed here.
- void Wait() { semaphore_wait(semaphore_); }
+ void Wait() {
+ int r;
+ do {
+ r = semaphore_wait(semaphore_);
+ ASSERT(r == KERN_SUCCESS || r == KERN_ABORTED);
+ } while (r == KERN_ABORTED);
+ }
bool Wait(int timeout);
diff --git a/src/3rdparty/v8/src/platform-nullos.cc b/src/3rdparty/v8/src/platform-nullos.cc
index 679ef8e..ccd2123 100644
--- a/src/3rdparty/v8/src/platform-nullos.cc
+++ b/src/3rdparty/v8/src/platform-nullos.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -215,6 +215,11 @@ double OS::nan_value() {
}
+CpuImplementer OS::GetCpuImplementer() {
+ UNIMPLEMENTED();
+}
+
+
bool OS::ArmCpuHasFeature(CpuFeature feature) {
UNIMPLEMENTED();
}
@@ -335,6 +340,12 @@ bool VirtualMemory::Guard(void* address) {
}
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() {
diff --git a/src/3rdparty/v8/src/platform-openbsd.cc b/src/3rdparty/v8/src/platform-openbsd.cc
index ba33a84..292927b 100644
--- a/src/3rdparty/v8/src/platform-openbsd.cc
+++ b/src/3rdparty/v8/src/platform-openbsd.cc
@@ -323,9 +323,6 @@ void OS::LogSharedLibraryAddresses() {
}
-static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
-
-
void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
@@ -336,7 +333,7 @@ void OS::SignalCodeMovingGC() {
// by the kernel and allows us to synchronize V8 code log and the
// kernel log.
int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(kGCFakeMmap, "w+");
+ FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
fileno(f), 0);
ASSERT(addr != MAP_FAILED);
@@ -507,6 +504,12 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
diff --git a/src/3rdparty/v8/src/platform-posix.cc b/src/3rdparty/v8/src/platform-posix.cc
index d942d78..3bc8373 100644
--- a/src/3rdparty/v8/src/platform-posix.cc
+++ b/src/3rdparty/v8/src/platform-posix.cc
@@ -153,6 +153,11 @@ double OS::nan_value() {
}
+int OS::GetCurrentProcessId() {
+ return static_cast<int>(getpid());
+}
+
+
// ----------------------------------------------------------------------------
// POSIX date/time support.
//
diff --git a/src/3rdparty/v8/src/platform-qnx.cc b/src/3rdparty/v8/src/platform-qnx.cc
index 46d69b8..e535756 100644
--- a/src/3rdparty/v8/src/platform-qnx.cc
+++ b/src/3rdparty/v8/src/platform-qnx.cc
@@ -125,12 +125,15 @@ static bool CPUInfoContainsString(const char * search_string) {
bool OS::ArmCpuHasFeature(CpuFeature feature) {
switch (feature) {
+ case VFP2:
case VFP3:
// All shipping devices currently support this and QNX has no easy way to
// determine this at runtime.
return true;
case ARMv7:
return (SYSPAGE_ENTRY(cpuinfo)->flags & ARM_CPU_FLAG_V7) != 0;
+ case SUDIV:
+ return CPUInfoContainsString("idiva");
default:
UNREACHABLE();
}
@@ -138,6 +141,12 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
return false;
}
+CpuImplementer OS::GetCpuImplementer() {
+ // We do NOT return QUALCOMM_IMPLEMENTER, even though /proc/cpuinfo
+ // has "CPU implementer : 0x51" in it, as that leads to a runtime
+ // error on the first JS function call.
+ return UNKNOWN_IMPLEMENTER;
+}
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
@@ -377,7 +386,7 @@ void OS::LogSharedLibraryAddresses() {
return;
}
- /* Get the number of map entrys. */
+ /* Get the number of map entries. */
if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
close(proc_fd);
return;
@@ -389,7 +398,7 @@ void OS::LogSharedLibraryAddresses() {
return;
}
- /* Fill the map entrys. */
+ /* Fill the map entries. */
if (devctl(proc_fd, DCMD_PROC_PAGEDATA, mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
free(mapinfos);
close(proc_fd);
@@ -595,6 +604,10 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
}
+bool VirtualMemory::HasLazyCommits() {
+ return false;
+}
+
class Thread::PlatformData : public Malloced {
public:
@@ -980,7 +993,7 @@ class SignalSender : public Thread {
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
+ // occurring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
diff --git a/src/3rdparty/v8/src/platform-solaris.cc b/src/3rdparty/v8/src/platform-solaris.cc
index 4248ea2..5652741 100644
--- a/src/3rdparty/v8/src/platform-solaris.cc
+++ b/src/3rdparty/v8/src/platform-solaris.cc
@@ -125,12 +125,8 @@ const char* OS::LocalTimezone(double time) {
double OS::LocalTimeOffset() {
- // On Solaris, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- ASSERT(utc != -1);
- struct tm* loc = localtime(&utc);
- ASSERT(loc != NULL);
- return static_cast<double>((mktime(loc) - utc) * msPerSecond);
+ tzset();
+ return -static_cast<double>(timezone * msPerSecond);
}
@@ -448,6 +444,12 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) { }
diff --git a/src/3rdparty/v8/src/platform-tls-win32.h b/src/3rdparty/v8/src/platform-tls-win32.h
index 4056e8c..a981d18 100644
--- a/src/3rdparty/v8/src/platform-tls-win32.h
+++ b/src/3rdparty/v8/src/platform-tls-win32.h
@@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
-#if defined(_WIN32) && !defined(_WIN64)
+#if defined(_WIN32) && !defined(_WIN64) && !defined(_WIN32_WCE)
#define V8_FAST_TLS_SUPPORTED 1
diff --git a/src/3rdparty/v8/src/platform-win32.cc b/src/3rdparty/v8/src/platform-win32.cc
index ba57803..76e35f5 100644
--- a/src/3rdparty/v8/src/platform-win32.cc
+++ b/src/3rdparty/v8/src/platform-win32.cc
@@ -58,6 +58,20 @@ int strncasecmp(const char* s1, const char* s2, int n) {
#endif // _MSC_VER
+#ifdef _WIN32_WCE
+// Convert a Latin1 string into a utf16 string
+wchar_t* wce_mbtowc(const char* a) {
+ int length = strlen(a);
+ wchar_t *wbuf = new wchar_t[length];
+
+ for (int i = 0; i < length; ++i)
+ wbuf[i] = (wchar_t)a[i];
+
+ return wbuf;
+}
+#endif // _WIN32_WCE
+
+
// Extra functions for MinGW. Most of these are the _s functions which are in
// the Microsoft Visual Studio C++ CRT.
#ifdef __MINGW32__
@@ -162,6 +176,13 @@ void OS::MemCopy(void* dest, const void* src, size_t size) {
}
#endif // V8_TARGET_ARCH_IA32
+#ifdef _WIN32_WCE
+// TODO: Implement
+CpuImplementer OS::GetCpuImplementer() {
+ return UNKNOWN_IMPLEMENTER;
+}
+#endif // _WIN32_WCE
+
#ifdef _WIN64
typedef double (*ModuloFunction)(double, double);
static ModuloFunction modulo_function = NULL;
@@ -377,7 +398,9 @@ void Time::TzSet() {
if (tz_initialized_) return;
// Initialize POSIX time zone data.
+#ifndef _WIN32_WCE
_tzset();
+#endif // _WIN32_WCE
// Obtain timezone information from operating system.
memset(&tzinfo_, 0, sizeof(tzinfo_));
if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
@@ -489,6 +512,7 @@ void Time::SetToCurrentTime() {
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
int64_t Time::LocalOffset() {
+#ifndef _WIN32_WCE
// Initialize timezone information, if needed.
TzSet();
@@ -519,6 +543,11 @@ int64_t Time::LocalOffset() {
} else {
return tzinfo_.Bias * -kMsPerMinute;
}
+#else
+ // Windows CE has a different handling of Timezones.
+ // TODO: Adapt this for Windows CE
+ return 0;
+#endif
}
@@ -570,6 +599,14 @@ void OS::PostSetUp() {
#endif
}
+#ifdef V8_TARGET_ARCH_ARM
+// TODO: Implement
+// Windows CE is the only platform right now that supports ARM.
+bool OS::ArmCpuHasFeature(CpuFeature feature) {
+ return false;
+}
+#endif // V8_TARGET_ARCH_ARM
+
// Returns the accumulated user time for thread.
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
@@ -634,6 +671,11 @@ int OS::GetLastError() {
}
+int OS::GetCurrentProcessId() {
+ return static_cast<int>(::GetCurrentProcessId());
+}
+
+
// ----------------------------------------------------------------------------
// Win32 console output.
//
@@ -659,6 +701,7 @@ static OutputMode output_mode = UNKNOWN; // Current output mode.
static bool HasConsole() {
// Only check the first time. Eventual race conditions are not a problem,
// because all threads will eventually determine the same mode.
+#ifndef _WIN32_WCE
if (output_mode == UNKNOWN) {
// We cannot just check that the standard output is attached to a console
// because this would fail if output is redirected to a file. Therefore we
@@ -671,6 +714,10 @@ static bool HasConsole() {
output_mode = ODS;
}
return output_mode == CONSOLE;
+#else
+ // Windows CE has no shell enabled in the standard BSP
+ return false;
+#endif // _WIN32_WCE
}
@@ -683,7 +730,14 @@ static void VPrintHelper(FILE* stream, const char* format, va_list args) {
// does not crash.
EmbeddedVector<char, 4096> buffer;
OS::VSNPrintF(buffer, format, args);
+#ifdef _WIN32_WCE
+ wchar_t wbuf[4096];
+ for (int i = 0; i < 4096; ++i)
+ wbuf[i] = (wchar_t)buffer.start()[i];
+ OutputDebugStringW(wbuf);
+#else
OutputDebugStringA(buffer.start());
+#endif // _WIN32_WCE
}
}
@@ -699,23 +753,30 @@ FILE* OS::FOpen(const char* path, const char* mode) {
bool OS::Remove(const char* path) {
+#ifndef _WIN32_WCE
return (DeleteFileA(path) != 0);
+#else
+ wchar_t *wpath = wce_mbtowc(path);
+ bool ret = (DeleteFileW(wpath) != 0);
+ delete wpath;
+ return ret;
+#endif // _WIN32_WCE
}
FILE* OS::OpenTemporaryFile() {
// tmpfile_s tries to use the root dir, don't use it.
- char tempPathBuffer[MAX_PATH];
+ wchar_t tempPathBuffer[MAX_PATH];
DWORD path_result = 0;
- path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
+ path_result = GetTempPathW(MAX_PATH, tempPathBuffer);
if (path_result > MAX_PATH || path_result == 0) return NULL;
UINT name_result = 0;
- char tempNameBuffer[MAX_PATH];
- name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
+ wchar_t tempNameBuffer[MAX_PATH];
+ name_result = GetTempFileNameW(tempPathBuffer, L"", 0, tempNameBuffer);
if (name_result == 0) return NULL;
- FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses.
+ FILE* result = _wfopen(tempNameBuffer, L"w+"); // Same mode as tmpfile uses.
if (result != NULL) {
- Remove(tempNameBuffer); // Delete on close.
+ DeleteFileW(tempNameBuffer); // Delete on close.
}
return result;
}
@@ -969,7 +1030,11 @@ void OS::Abort() {
DebugBreak();
} else {
// Make the MSVCRT do a silent abort.
+#ifndef _WIN32_WCE
raise(SIGABRT);
+#else
+ exit(3);
+#endif // _WIN32_WCE
}
}
@@ -1006,8 +1071,15 @@ class Win32MemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// Open a physical file
+#ifndef _WIN32_WCE
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
+#else
+ wchar_t *wname = wce_mbtowc(name);
+ HANDLE file = CreateFileW(wname, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
+ delete wname;
+#endif // _WIN32_WCE
if (file == INVALID_HANDLE_VALUE) return NULL;
int size = static_cast<int>(GetFileSize(file, NULL));
@@ -1026,8 +1098,15 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
// Open a physical file
+#ifndef _WIN32_WCE
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
+#else
+ wchar_t *wname = wce_mbtowc(name);
+ HANDLE file = CreateFileW(wname, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
+ delete wname;
+#endif // _WIN32_WCE
if (file == NULL) return NULL;
// Create a file mapping for the physical file
HANDLE file_mapping = CreateFileMapping(file, NULL,
@@ -1089,8 +1168,8 @@ Win32MemoryMappedFile::~Win32MemoryMappedFile() {
#define VOID void
#endif
-// DbgHelp isn't supported on MinGW yet
-#ifndef __MINGW32__
+// DbgHelp isn't supported on MinGW yet, nor does Windows CE have it
+#if !defined(__MINGW32__) && !defined(_WIN32_WCE)
// DbgHelp.h functions.
typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
IN PSTR UserSearchPath,
@@ -1561,6 +1640,12 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
// ----------------------------------------------------------------------------
// Win32 thread support.
@@ -1613,6 +1698,7 @@ Thread::~Thread() {
// the Win32 function CreateThread(), because the CreateThread() does not
// initialize thread specific structures in the C runtime library.
void Thread::Start() {
+#ifndef _WIN32_WCE
data_->thread_ = reinterpret_cast<HANDLE>(
_beginthreadex(NULL,
static_cast<unsigned>(stack_size_),
@@ -1620,6 +1706,18 @@ void Thread::Start() {
this,
0,
&data_->thread_id_));
+#else
+ unsigned initflag = 0;
+ if (stack_size_ > 0)
+ initflag |= STACK_SIZE_PARAM_IS_A_RESERVATION;
+ data_->thread_ = reinterpret_cast<HANDLE>(
+ CreateThread( NULL,
+ static_cast<unsigned>(stack_size_),
+ (LPTHREAD_START_ROUTINE)ThreadEntry,
+ this,
+ initflag,
+ (LPDWORD)&data_->thread_id_));
+#endif // _WIN32_WCE
}
@@ -1714,7 +1812,7 @@ Mutex* OS::CreateMutex() {
class Win32Semaphore : public Semaphore {
public:
explicit Win32Semaphore(int count) {
- sem = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
+ sem = ::CreateSemaphoreW(NULL, count, 0x7fffffff, NULL);
}
~Win32Semaphore() {
@@ -2062,10 +2160,17 @@ class SamplerThread : public Thread {
sample->pc = reinterpret_cast<Address>(context.Rip);
sample->sp = reinterpret_cast<Address>(context.Rsp);
sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
+#elif V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(context.Eip);
sample->sp = reinterpret_cast<Address>(context.Esp);
sample->fp = reinterpret_cast<Address>(context.Ebp);
+#elif V8_HOST_ARCH_ARM
+ // Taken from http://msdn.microsoft.com/en-us/library/aa448762.aspx
+ sample->pc = reinterpret_cast<Address>(context.Pc);
+ sample->sp = reinterpret_cast<Address>(context.Sp);
+ sample->fp = reinterpret_cast<Address>(context.R11);
+#else
+#error This Platform is not supported.
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);
diff --git a/src/3rdparty/v8/src/platform.h b/src/3rdparty/v8/src/platform.h
index a9464de..f3ab08d 100644
--- a/src/3rdparty/v8/src/platform.h
+++ b/src/3rdparty/v8/src/platform.h
@@ -71,6 +71,24 @@ int signbit(double x);
int strncasecmp(const char* s1, const char* s2, int n);
+inline int lrint(double flt) {
+ int intgr;
+#if defined(V8_TARGET_ARCH_IA32)
+ __asm {
+ fld flt
+ fistp intgr
+ };
+#else
+ intgr = static_cast<int>(flt + 0.5);
+ if ((intgr & 1) != 0 && intgr - flt == 0.5) {
+ // If the number is halfway between two integers, round to the even one.
+ intgr--;
+ }
+#endif
+ return intgr;
+}
+
+
#endif // _MSC_VER
// Random is missing on both Visual Studio and MinGW.
@@ -89,7 +107,11 @@ namespace internal {
// Use AtomicWord for a machine-sized pointer. It is assumed that
// reads and writes of naturally aligned values of this type are atomic.
+#if defined(__OpenBSD__) && defined(__i386__)
+typedef Atomic32 AtomicWord;
+#else
typedef intptr_t AtomicWord;
+#endif
class Semaphore;
class Mutex;
@@ -286,6 +308,9 @@ class OS {
// Returns the double constant NAN
static double nan_value();
+ // Support runtime detection of Cpu implementer
+ static CpuImplementer GetCpuImplementer();
+
// Support runtime detection of VFP3 on ARM CPUs.
static bool ArmCpuHasFeature(CpuFeature feature);
@@ -317,6 +342,8 @@ class OS {
static const int kMinComplexMemCopy = 256;
#endif // V8_TARGET_ARCH_IA32
+ static int GetCurrentProcessId();
+
private:
static const int msPerSecond = 1000;
@@ -405,6 +432,11 @@ class VirtualMemory {
// and the same size it was reserved with.
static bool ReleaseRegion(void* base, size_t size);
+ // Returns true if OS performs lazy commits, i.e. the memory allocation call
+ // defers actual physical memory allocation till the first memory access.
+ // Otherwise returns false.
+ static bool HasLazyCommits();
+
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
diff --git a/src/3rdparty/v8/src/preparser.cc b/src/3rdparty/v8/src/preparser.cc
index 0c17eec..21da4f8 100644
--- a/src/3rdparty/v8/src/preparser.cc
+++ b/src/3rdparty/v8/src/preparser.cc
@@ -602,14 +602,17 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
if (token == i::Token::CASE) {
Expect(i::Token::CASE, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
- } else if (token == i::Token::DEFAULT) {
- Expect(i::Token::DEFAULT, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
} else {
- ParseStatement(CHECK_OK);
+ Expect(i::Token::DEFAULT, CHECK_OK);
}
+ Expect(i::Token::COLON, CHECK_OK);
token = peek();
+ while (token != i::Token::CASE &&
+ token != i::Token::DEFAULT &&
+ token != i::Token::RBRACE) {
+ ParseStatement(CHECK_OK);
+ token = peek();
+ }
}
Expect(i::Token::RBRACE, ok);
return Statement::Default();
diff --git a/src/3rdparty/v8/src/profile-generator-inl.h b/src/3rdparty/v8/src/profile-generator-inl.h
index 9afc52f..02e146f 100644
--- a/src/3rdparty/v8/src/profile-generator-inl.h
+++ b/src/3rdparty/v8/src/profile-generator-inl.h
@@ -84,6 +84,7 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
return gc_entry_;
case JS:
case COMPILER:
+ case PARALLEL_COMPILER_PROLOGUE:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
@@ -118,32 +119,12 @@ int HeapEntry::set_children_index(int index) {
}
-int HeapEntry::set_retainers_index(int index) {
- retainers_index_ = index;
- int next_index = index + retainers_count_;
- retainers_count_ = 0;
- return next_index;
-}
-
-
HeapGraphEdge** HeapEntry::children_arr() {
ASSERT(children_index_ >= 0);
return &snapshot_->children()[children_index_];
}
-HeapGraphEdge** HeapEntry::retainers_arr() {
- ASSERT(retainers_index_ >= 0);
- return &snapshot_->retainers()[retainers_index_];
-}
-
-
-HeapEntry* HeapEntry::dominator() const {
- ASSERT(dominator_ >= 0);
- return &snapshot_->entries()[dominator_];
-}
-
-
SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
return kGcRootsFirstSubrootId + delta * kObjectIdStep;
}
diff --git a/src/3rdparty/v8/src/profile-generator.cc b/src/3rdparty/v8/src/profile-generator.cc
index da2a969..9839edf 100644
--- a/src/3rdparty/v8/src/profile-generator.cc
+++ b/src/3rdparty/v8/src/profile-generator.cc
@@ -169,6 +169,15 @@ const char* StringsStorage::GetName(int index) {
}
+size_t StringsStorage::GetUsedMemorySize() const {
+ size_t size = sizeof(*this);
+ size += sizeof(HashMap::Entry) * names_.capacity();
+ for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+ size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
+ }
+ return size;
+}
+
const char* const CodeEntry::kEmptyNamePrefix = "";
@@ -964,16 +973,10 @@ HeapEntry::HeapEntry(HeapSnapshot* snapshot,
const char* name,
SnapshotObjectId id,
int self_size)
- : painted_(false),
- user_reachable_(false),
- dominator_(kNoEntry),
- type_(type),
- retainers_count_(0),
- retainers_index_(-1),
+ : type_(type),
children_count_(0),
children_index_(-1),
self_size_(self_size),
- retained_size_(0),
id_(id),
snapshot_(snapshot),
name_(name) { }
@@ -985,7 +988,6 @@ void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
HeapGraphEdge edge(type, name, this->index(), entry->index());
snapshot_->edges().Add(edge);
++children_count_;
- ++entry->retainers_count_;
}
@@ -995,7 +997,6 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
HeapGraphEdge edge(type, index, this->index(), entry->index());
snapshot_->edges().Add(edge);
++children_count_;
- ++entry->retainers_count_;
}
@@ -1007,9 +1008,8 @@ Handle<HeapObject> HeapEntry::GetHeapObject() {
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6d %7d @%6u %*c %s%s: ",
- self_size(), retained_size(), id(),
- indent, ' ', prefix, edge_name);
+ OS::Print("%6d @%6u %*c %s%s: ",
+ self_size(), id(), indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@@ -1091,13 +1091,17 @@ template <size_t ptr_size> struct SnapshotSizeConstants;
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 40;
+ static const int kExpectedHeapEntrySize = 24;
+ static const int kExpectedHeapSnapshotsCollectionSize = 96;
+ static const int kExpectedHeapSnapshotSize = 136;
static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
};
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 48;
+ static const int kExpectedHeapEntrySize = 32;
+ static const int kExpectedHeapSnapshotsCollectionSize = 144;
+ static const int kExpectedHeapSnapshotSize = 168;
static const uint64_t kMaxSerializableSnapshotRawSize =
static_cast<uint64_t>(6000) * MB;
};
@@ -1139,16 +1143,6 @@ void HeapSnapshot::RememberLastJSObjectId() {
}
-static void HeapEntryClearPaint(HeapEntry* entry_ptr) {
- entry_ptr->clear_paint();
-}
-
-
-void HeapSnapshot::ClearPaint() {
- entries_.Iterate(HeapEntryClearPaint);
-}
-
-
HeapEntry* HeapSnapshot::AddRootEntry() {
ASSERT(root_index_ == HeapEntry::kNoEntry);
ASSERT(entries_.is_empty()); // Root entry must be the first one.
@@ -1196,32 +1190,19 @@ HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
}
-void HeapSnapshot::FillChildrenAndRetainers() {
+void HeapSnapshot::FillChildren() {
ASSERT(children().is_empty());
children().Allocate(edges().length());
- ASSERT(retainers().is_empty());
- retainers().Allocate(edges().length());
int children_index = 0;
- int retainers_index = 0;
for (int i = 0; i < entries().length(); ++i) {
HeapEntry* entry = &entries()[i];
children_index = entry->set_children_index(children_index);
- retainers_index = entry->set_retainers_index(retainers_index);
}
ASSERT(edges().length() == children_index);
- ASSERT(edges().length() == retainers_index);
for (int i = 0; i < edges().length(); ++i) {
HeapGraphEdge* edge = &edges()[i];
edge->ReplaceToIndexWithEntry(this);
edge->from()->add_child(edge);
- edge->to()->add_retainer(edge);
- }
-}
-
-
-void HeapSnapshot::SetDominatorsToSelf() {
- for (int i = 0; i < entries_.length(); ++i) {
- entries_[i].set_dominator(&entries_[i]);
}
}
@@ -1275,16 +1256,18 @@ void HeapSnapshot::Print(int max_depth) {
template<typename T, class P>
static size_t GetMemoryUsedByList(const List<T, P>& list) {
- return list.capacity() * sizeof(T);
+ return list.length() * sizeof(T) + sizeof(list);
}
size_t HeapSnapshot::RawSnapshotSize() const {
+ STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::kExpectedHeapSnapshotSize ==
+ sizeof(HeapSnapshot)); // NOLINT
return
+ sizeof(*this) +
GetMemoryUsedByList(entries_) +
GetMemoryUsedByList(edges_) +
GetMemoryUsedByList(children_) +
- GetMemoryUsedByList(retainers_) +
GetMemoryUsedByList(sorted_entries_);
}
@@ -1390,7 +1373,7 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
}
-void HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
+SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
UpdateHeapObjectsMap();
time_intervals_.Add(TimeInterval(next_id_));
int prefered_chunk_size = stream->GetChunkSize();
@@ -1420,7 +1403,7 @@ void HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
if (stats_buffer.length() >= prefered_chunk_size) {
OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
&stats_buffer.first(), stats_buffer.length());
- if (result == OutputStream::kAbort) return;
+ if (result == OutputStream::kAbort) return last_assigned_id();
stats_buffer.Clear();
}
}
@@ -1429,9 +1412,10 @@ void HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
if (!stats_buffer.is_empty()) {
OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
&stats_buffer.first(), stats_buffer.length());
- if (result == OutputStream::kAbort) return;
+ if (result == OutputStream::kAbort) return last_assigned_id();
}
stream->EndOfStream();
+ return last_assigned_id();
}
@@ -1478,6 +1462,15 @@ SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
}
+size_t HeapObjectsMap::GetUsedMemorySize() const {
+ return
+ sizeof(*this) +
+ sizeof(HashMap::Entry) * entries_map_.capacity() +
+ GetMemoryUsedByList(entries_) +
+ GetMemoryUsedByList(time_intervals_);
+}
+
+
HeapSnapshotsCollection::HeapSnapshotsCollection()
: is_tracking_objects_(false),
snapshots_uids_(HeapSnapshotsMatch),
@@ -1557,6 +1550,22 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
}
+size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
+ STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::
+ kExpectedHeapSnapshotsCollectionSize ==
+ sizeof(HeapSnapshotsCollection)); // NOLINT
+ size_t size = sizeof(*this);
+ size += names_.GetUsedMemorySize();
+ size += ids_.GetUsedMemorySize();
+ size += sizeof(HashMap::Entry) * snapshots_uids_.capacity();
+ size += GetMemoryUsedByList(snapshots_);
+ for (int i = 0; i < snapshots_.length(); ++i) {
+ size += snapshots_[i]->RawSnapshotSize();
+ }
+ return size;
+}
+
+
HeapEntriesMap::HeapEntriesMap()
: entries_(HeapThingsMatch) {
}
@@ -1702,8 +1711,8 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
name->IsString()
? collection_->names()->GetName(String::cast(name))
: "");
- } else if (object->IsGlobalContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / GlobalContext");
+ } else if (object->IsNativeContext()) {
+ return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
} else if (object->IsContext()) {
return AddEntry(object, HeapEntry::kHidden, "system / Context");
} else if (object->IsFixedArray() ||
@@ -1937,8 +1946,8 @@ void V8HeapExplorer::ExtractJSObjectReferences(
"builtins", global_obj->builtins(),
GlobalObject::kBuiltinsOffset);
SetInternalReference(global_obj, entry,
- "global_context", global_obj->global_context(),
- GlobalObject::kGlobalContextOffset);
+ "native_context", global_obj->native_context(),
+ GlobalObject::kNativeContextOffset);
SetInternalReference(global_obj, entry,
"global_receiver", global_obj->global_receiver(),
GlobalObject::kGlobalReceiverOffset);
@@ -1973,17 +1982,17 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
- EXTRACT_CONTEXT_FIELD(GLOBAL_INDEX, GlobalObject, global);
- if (context->IsGlobalContext()) {
+ EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
+ if (context->IsNativeContext()) {
TagObject(context->jsfunction_result_caches(),
"(context func. result caches)");
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->data(), "(context data)");
- GLOBAL_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
+ NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
#undef EXTRACT_CONTEXT_FIELD
for (int i = Context::FIRST_WEAK_SLOT;
- i < Context::GLOBAL_CONTEXT_SLOTS;
+ i < Context::NATIVE_CONTEXT_SLOTS;
++i) {
SetWeakReference(context, entry, i, context->get(i),
FixedArray::OffsetOfElementAt(i));
@@ -1998,22 +2007,34 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
SetInternalReference(map, entry,
"constructor", map->constructor(),
Map::kConstructorOffset);
- if (!map->instance_descriptors()->IsEmpty()) {
- TagObject(map->instance_descriptors(), "(map descriptors)");
+ if (map->HasTransitionArray()) {
+ TransitionArray* transitions = map->transitions();
+
+ Object* back_pointer = transitions->back_pointer_storage();
+ TagObject(transitions->back_pointer_storage(), "(back pointer)");
+ SetInternalReference(transitions, entry,
+ "backpointer", back_pointer,
+ TransitionArray::kBackPointerStorageOffset);
+ IndexedReferencesExtractor transitions_refs(this, transitions, entry);
+ transitions->Iterate(&transitions_refs);
+
+ TagObject(transitions, "(transition array)");
SetInternalReference(map, entry,
- "descriptors", map->instance_descriptors(),
- Map::kInstanceDescriptorsOrBitField3Offset);
- }
- if (map->unchecked_prototype_transitions()->IsFixedArray()) {
- TagObject(map->prototype_transitions(), "(prototype transitions)");
- SetInternalReference(map, entry,
- "prototype_transitions", map->prototype_transitions(),
- Map::kPrototypeTransitionsOrBackPointerOffset);
+ "transitions", transitions,
+ Map::kTransitionsOrBackPointerOffset);
} else {
+ Object* back_pointer = map->GetBackPointer();
+ TagObject(back_pointer, "(back pointer)");
SetInternalReference(map, entry,
- "back_pointer", map->GetBackPointer(),
- Map::kPrototypeTransitionsOrBackPointerOffset);
+ "backpointer", back_pointer,
+ Map::kTransitionsOrBackPointerOffset);
}
+ DescriptorArray* descriptors = map->instance_descriptors();
+ TagObject(descriptors, "(map descriptors)");
+ SetInternalReference(map, entry,
+ "descriptors", descriptors,
+ Map::kDescriptorsOffset);
+
SetInternalReference(map, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
@@ -2169,20 +2190,37 @@ void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
+ int real_size = js_obj->map()->NumberOfOwnDescriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ if (descs->GetDetails(i).descriptor_index() > real_size) continue;
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
+
+ String* k = descs->GetKey(i);
if (index < js_obj->map()->inobject_properties()) {
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), js_obj->InObjectPropertyAt(index),
- NULL,
- js_obj->GetInObjectPropertyOffset(index));
+ Object* value = js_obj->InObjectPropertyAt(index);
+ if (k != heap_->hidden_symbol()) {
+ SetPropertyReference(
+ js_obj, entry,
+ k, value,
+ NULL,
+ js_obj->GetInObjectPropertyOffset(index));
+ } else {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(
+ js_obj, entry,
+ "hidden_properties", value,
+ js_obj->GetInObjectPropertyOffset(index));
+ }
} else {
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), js_obj->FastPropertyAt(index));
+ Object* value = js_obj->FastPropertyAt(index);
+ if (k != heap_->hidden_symbol()) {
+ SetPropertyReference(js_obj, entry, k, value);
+ } else {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(js_obj, entry, "hidden_properties", value);
+ }
}
break;
}
@@ -2209,10 +2247,10 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
case NORMAL: // only in slow mode
case HANDLER: // only in lookup results, not in descriptors
case INTERCEPTOR: // only in lookup results, not in descriptors
- case MAP_TRANSITION: // we do not care about transitions here...
- case ELEMENTS_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR: // ... and not about "holes"
+ break;
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
break;
}
}
@@ -2227,7 +2265,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
Object* value = target->IsJSGlobalPropertyCell()
? JSGlobalPropertyCell::cast(target)->value()
: target;
- if (String::cast(k)->length() > 0) {
+ if (k != heap_->hidden_symbol()) {
SetPropertyReference(js_obj, entry, String::cast(k), value);
} else {
TagObject(value, "(hidden properties)");
@@ -2240,7 +2278,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
- if (js_obj->HasFastElements()) {
+ if (js_obj->HasFastObjectElements()) {
FixedArray* elements = FixedArray::cast(js_obj->elements());
int length = js_obj->IsJSArray() ?
Smi::cast(JSArray::cast(js_obj)->length())->value() :
@@ -2286,11 +2324,12 @@ String* V8HeapExplorer::GetConstructorName(JSObject* object) {
Object* constructor_prop = NULL;
LookupResult result(heap->isolate());
object->LocalLookupRealNamedProperty(heap->constructor_symbol(), &result);
- if (result.IsProperty()) {
- constructor_prop = result.GetLazyValue();
- }
+ if (!result.IsFound()) return object->constructor_name();
+
+ constructor_prop = result.GetLazyValue();
if (constructor_prop->IsJSFunction()) {
- Object* maybe_name = JSFunction::cast(constructor_prop)->shared()->name();
+ Object* maybe_name =
+ JSFunction::cast(constructor_prop)->shared()->name();
if (maybe_name->IsString()) {
String* name = String::cast(maybe_name);
if (name->length() > 0) return name;
@@ -2404,19 +2443,17 @@ bool V8HeapExplorer::IterateAndExtractReferences(
bool V8HeapExplorer::IsEssentialObject(Object* object) {
- // We have to use raw_unchecked_* versions because checked versions
- // would fail during iteration over object properties.
return object->IsHeapObject()
&& !object->IsOddball()
- && object != heap_->raw_unchecked_empty_byte_array()
- && object != heap_->raw_unchecked_empty_fixed_array()
- && object != heap_->raw_unchecked_empty_descriptor_array()
- && object != heap_->raw_unchecked_fixed_array_map()
- && object != heap_->raw_unchecked_global_property_cell_map()
- && object != heap_->raw_unchecked_shared_function_info_map()
- && object != heap_->raw_unchecked_free_space_map()
- && object != heap_->raw_unchecked_one_pointer_filler_map()
- && object != heap_->raw_unchecked_two_pointer_filler_map();
+ && object != heap_->empty_byte_array()
+ && object != heap_->empty_fixed_array()
+ && object != heap_->empty_descriptor_array()
+ && object != heap_->fixed_array_map()
+ && object != heap_->global_property_cell_map()
+ && object != heap_->shared_function_info_map()
+ && object != heap_->free_space_map()
+ && object != heap_->one_pointer_filler_map()
+ && object != heap_->two_pointer_filler_map();
}
@@ -2552,20 +2589,6 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
}
-void V8HeapExplorer::SetPropertyShortcutReference(HeapObject* parent_obj,
- int parent_entry,
- String* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kShortcut,
- parent_entry,
- collection_->names()->GetName(reference_name),
- child_entry);
- }
-}
-
-
void V8HeapExplorer::SetRootGcRootsReference() {
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
@@ -2646,7 +2669,7 @@ class GlobalObjectsEnumerator : public ObjectVisitor {
public:
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
- if ((*p)->IsGlobalContext()) {
+ if ((*p)->IsNativeContext()) {
Context* context = Context::cast(*p);
JSObject* proxy = context->global_proxy();
if (proxy->IsJSGlobalProxy()) {
@@ -2684,6 +2707,10 @@ void V8HeapExplorer::TagGlobalObjects() {
Object* obj_document;
if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
obj_document->IsJSObject()) {
+ // FixMe: Workaround: SharedWorker's current Isolate has NULL context.
+ // As result GetProperty(*url_string) will crash.
+ if (!Isolate::Current()->context() && obj_document->IsJSGlobalProxy())
+ continue;
JSObject* document = JSObject::cast(obj_document);
Object* obj_url;
if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
@@ -3066,37 +3093,34 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
Heap::kMakeHeapIterableMask,
"HeapSnapshotGenerator::GenerateSnapshot");
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
Heap* debug_heap = Isolate::Current()->heap();
- ASSERT(!debug_heap->old_data_space()->was_swept_conservatively());
- ASSERT(!debug_heap->old_pointer_space()->was_swept_conservatively());
- ASSERT(!debug_heap->code_space()->was_swept_conservatively());
- ASSERT(!debug_heap->cell_space()->was_swept_conservatively());
- ASSERT(!debug_heap->map_space()->was_swept_conservatively());
+ CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
+ CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
+ CHECK(!debug_heap->code_space()->was_swept_conservatively());
+ CHECK(!debug_heap->cell_space()->was_swept_conservatively());
+ CHECK(!debug_heap->map_space()->was_swept_conservatively());
#endif
// The following code uses heap iterators, so we want the heap to be
// stable. It should follow TagGlobalObjects as that can allocate.
AssertNoAllocation no_alloc;
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
debug_heap->Verify();
#endif
SetProgressTotal(1); // 1 pass.
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
debug_heap->Verify();
#endif
if (!FillReferences()) return false;
- snapshot_->FillChildrenAndRetainers();
+ snapshot_->FillChildren();
snapshot_->RememberLastJSObjectId();
- if (!SetEntriesDominators()) return false;
- if (!CalculateRetainedSizes()) return false;
-
progress_counter_ = progress_total_;
if (!ProgressReport(true)) return false;
return true;
@@ -3138,187 +3162,6 @@ bool HeapSnapshotGenerator::FillReferences() {
}
-bool HeapSnapshotGenerator::IsUserGlobalReference(const HeapGraphEdge* edge) {
- ASSERT(edge->from() == snapshot_->root());
- return edge->type() == HeapGraphEdge::kShortcut;
-}
-
-
-void HeapSnapshotGenerator::MarkUserReachableObjects() {
- List<HeapEntry*> worklist;
-
- Vector<HeapGraphEdge*> children = snapshot_->root()->children();
- for (int i = 0; i < children.length(); ++i) {
- if (IsUserGlobalReference(children[i])) {
- worklist.Add(children[i]->to());
- }
- }
-
- while (!worklist.is_empty()) {
- HeapEntry* entry = worklist.RemoveLast();
- if (entry->user_reachable()) continue;
- entry->set_user_reachable();
- Vector<HeapGraphEdge*> children = entry->children();
- for (int i = 0; i < children.length(); ++i) {
- HeapEntry* child = children[i]->to();
- if (!child->user_reachable()) {
- worklist.Add(child);
- }
- }
- }
-}
-
-
-static bool IsRetainingEdge(HeapGraphEdge* edge) {
- if (edge->type() == HeapGraphEdge::kShortcut) return false;
- // The edge is not retaining if it goes from system domain
- // (i.e. an object not reachable from window) to the user domain
- // (i.e. a reachable object).
- return edge->from()->user_reachable()
- || !edge->to()->user_reachable();
-}
-
-
-void HeapSnapshotGenerator::FillPostorderIndexes(
- Vector<HeapEntry*>* entries) {
- snapshot_->ClearPaint();
- int current_entry = 0;
- List<HeapEntry*> nodes_to_visit;
- HeapEntry* root = snapshot_->root();
- nodes_to_visit.Add(root);
- snapshot_->root()->paint();
- while (!nodes_to_visit.is_empty()) {
- HeapEntry* entry = nodes_to_visit.last();
- Vector<HeapGraphEdge*> children = entry->children();
- bool has_new_edges = false;
- for (int i = 0; i < children.length(); ++i) {
- if (entry != root && !IsRetainingEdge(children[i])) continue;
- HeapEntry* child = children[i]->to();
- if (!child->painted()) {
- nodes_to_visit.Add(child);
- child->paint();
- has_new_edges = true;
- }
- }
- if (!has_new_edges) {
- entry->set_postorder_index(current_entry);
- (*entries)[current_entry++] = entry;
- nodes_to_visit.RemoveLast();
- }
- }
- ASSERT_EQ(current_entry, entries->length());
-}
-
-
-static int Intersect(int i1, int i2, const Vector<int>& dominators) {
- int finger1 = i1, finger2 = i2;
- while (finger1 != finger2) {
- while (finger1 < finger2) finger1 = dominators[finger1];
- while (finger2 < finger1) finger2 = dominators[finger2];
- }
- return finger1;
-}
-
-
-// The algorithm is based on the article:
-// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
-// Softw. Pract. Exper. 4 (2001), pp. 1-10.
-bool HeapSnapshotGenerator::BuildDominatorTree(
- const Vector<HeapEntry*>& entries,
- Vector<int>* dominators) {
- if (entries.length() == 0) return true;
- HeapEntry* root = snapshot_->root();
- const int entries_length = entries.length(), root_index = entries_length - 1;
- for (int i = 0; i < root_index; ++i) (*dominators)[i] = HeapEntry::kNoEntry;
- (*dominators)[root_index] = root_index;
-
- // The affected array is used to mark entries which dominators
- // have to be racalculated because of changes in their retainers.
- ScopedVector<bool> affected(entries_length);
- for (int i = 0; i < affected.length(); ++i) affected[i] = false;
- // Mark the root direct children as affected.
- Vector<HeapGraphEdge*> children = entries[root_index]->children();
- for (int i = 0; i < children.length(); ++i) {
- affected[children[i]->to()->postorder_index()] = true;
- }
-
- bool changed = true;
- while (changed) {
- changed = false;
- if (!ProgressReport(false)) return false;
- for (int i = root_index - 1; i >= 0; --i) {
- if (!affected[i]) continue;
- affected[i] = false;
- // If dominator of the entry has already been set to root,
- // then it can't propagate any further.
- if ((*dominators)[i] == root_index) continue;
- int new_idom_index = HeapEntry::kNoEntry;
- Vector<HeapGraphEdge*> rets = entries[i]->retainers();
- for (int j = 0; j < rets.length(); ++j) {
- if (rets[j]->from() != root && !IsRetainingEdge(rets[j])) continue;
- int ret_index = rets[j]->from()->postorder_index();
- if (dominators->at(ret_index) != HeapEntry::kNoEntry) {
- new_idom_index = new_idom_index == HeapEntry::kNoEntry
- ? ret_index
- : Intersect(ret_index, new_idom_index, *dominators);
- // If idom has already reached the root, it doesn't make sense
- // to check other retainers.
- if (new_idom_index == root_index) break;
- }
- }
- if (new_idom_index != HeapEntry::kNoEntry
- && dominators->at(i) != new_idom_index) {
- (*dominators)[i] = new_idom_index;
- changed = true;
- Vector<HeapGraphEdge*> children = entries[i]->children();
- for (int j = 0; j < children.length(); ++j) {
- affected[children[j]->to()->postorder_index()] = true;
- }
- }
- }
- }
- return true;
-}
-
-
-bool HeapSnapshotGenerator::SetEntriesDominators() {
- MarkUserReachableObjects();
- // This array is used for maintaining postorder of nodes.
- ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries().length());
- FillPostorderIndexes(&ordered_entries);
- ScopedVector<int> dominators(ordered_entries.length());
- if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
- for (int i = 0; i < ordered_entries.length(); ++i) {
- ASSERT(dominators[i] != HeapEntry::kNoEntry);
- ordered_entries[i]->set_dominator(ordered_entries[dominators[i]]);
- }
- return true;
-}
-
-
-bool HeapSnapshotGenerator::CalculateRetainedSizes() {
- // As for the dominators tree we only know parent nodes, not
- // children, to sum up total sizes we "bubble" node's self size
- // adding it to all of its parents.
- List<HeapEntry>& entries = snapshot_->entries();
- for (int i = 0; i < entries.length(); ++i) {
- HeapEntry* entry = &entries[i];
- entry->set_retained_size(entry->self_size());
- }
- for (int i = 0; i < entries.length(); ++i) {
- int entry_size = entries[i].self_size();
- HeapEntry* current = &entries[i];
- for (HeapEntry* dominator = current->dominator();
- dominator != current;
- current = dominator, dominator = current->dominator()) {
- ASSERT(current->dominator() != NULL);
- dominator->add_retained_size(entry_size);
- }
- }
- return true;
-}
-
-
template<int bytes> struct MaxDecimalDigitsIn;
template<> struct MaxDecimalDigitsIn<4> {
static const int kSigned = 11;
@@ -3417,8 +3260,8 @@ class OutputStreamWriter {
// type, name|index, to_node.
const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
-// type, name, id, self_size, retained_size, dominator, children_index.
-const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 7;
+// type, name, id, self_size, children_index.
+const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
ASSERT(writer_ == NULL);
@@ -3458,14 +3301,12 @@ HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
(snapshot_->RawSnapshotSize() + MB - 1) / MB);
HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
- result->FillChildrenAndRetainers();
- result->SetDominatorsToSelf();
+ result->FillChildren();
return result;
}
void HeapSnapshotJSONSerializer::SerializeImpl() {
- List<HeapEntry>& nodes = snapshot_->entries();
ASSERT(0 == snapshot_->root()->index());
writer_->AddCharacter('{');
writer_->AddString("\"snapshot\":{");
@@ -3473,11 +3314,11 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
if (writer_->aborted()) return;
writer_->AddString("},\n");
writer_->AddString("\"nodes\":[");
- SerializeNodes(nodes);
+ SerializeNodes();
if (writer_->aborted()) return;
writer_->AddString("],\n");
writer_->AddString("\"edges\":[");
- SerializeEdges(nodes);
+ SerializeEdges();
if (writer_->aborted()) return;
writer_->AddString("],\n");
writer_->AddString("\"strings\":[");
@@ -3519,9 +3360,9 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
bool first_edge) {
- // The buffer needs space for 3 ints, 3 commas and \0
+ // The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
static const int kBufferSize =
- MaxDecimalDigitsIn<sizeof(int)>::kSigned * 3 + 3 + 1; // NOLINT
+ MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 2; // NOLINT
EmbeddedVector<char, kBufferSize> buffer;
int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
|| edge->type() == HeapGraphEdge::kHidden
@@ -3536,32 +3377,28 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
}
-void HeapSnapshotJSONSerializer::SerializeEdges(const List<HeapEntry>& nodes) {
- bool first_edge = true;
- for (int i = 0; i < nodes.length(); ++i) {
- HeapEntry* entry = &nodes[i];
- Vector<HeapGraphEdge*> children = entry->children();
- for (int j = 0; j < children.length(); ++j) {
- SerializeEdge(children[j], first_edge);
- first_edge = false;
- if (writer_->aborted()) return;
- }
+void HeapSnapshotJSONSerializer::SerializeEdges() {
+ List<HeapGraphEdge*>& edges = snapshot_->children();
+ for (int i = 0; i < edges.length(); ++i) {
+ ASSERT(i == 0 ||
+ edges[i - 1]->from()->index() <= edges[i]->from()->index());
+ SerializeEdge(edges[i], i == 0);
+ if (writer_->aborted()) return;
}
}
-void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry,
- int edges_index) {
- // The buffer needs space for 6 ints, 1 uint32_t, 7 commas, \n and \0
+void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
+ // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
static const int kBufferSize =
- 6 * MaxDecimalDigitsIn<sizeof(int)>::kSigned // NOLINT
- + MaxDecimalDigitsIn<sizeof(uint32_t)>::kUnsigned // NOLINT
- + 7 + 1 + 1;
+ 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 5 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
if (entry_index(entry) != 0) {
@@ -3575,23 +3412,17 @@ void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry,
buffer[buffer_pos++] = ',';
buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->retained_size(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry_index(entry->dominator()), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(edges_index, buffer, buffer_pos);
+ buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
}
-void HeapSnapshotJSONSerializer::SerializeNodes(const List<HeapEntry>& nodes) {
- int edges_index = 0;
- for (int i = 0; i < nodes.length(); ++i) {
- HeapEntry* entry = &nodes[i];
- SerializeNode(entry, edges_index);
- edges_index += entry->children().length() * kEdgeFieldsCount;
+void HeapSnapshotJSONSerializer::SerializeNodes() {
+ List<HeapEntry>& entries = snapshot_->entries();
+ for (int i = 0; i < entries.length(); ++i) {
+ SerializeNode(&entries[i]);
if (writer_->aborted()) return;
}
}
@@ -3615,9 +3446,7 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("name") ","
JSON_S("id") ","
JSON_S("self_size") ","
- JSON_S("retained_size") ","
- JSON_S("dominator") ","
- JSON_S("edges_index")) ","
+ JSON_S("edge_count")) ","
JSON_S("node_types") ":" JSON_A(
JSON_A(
JSON_S("hidden") ","
diff --git a/src/3rdparty/v8/src/profile-generator.h b/src/3rdparty/v8/src/profile-generator.h
index 92896c2..04f4a1c 100644
--- a/src/3rdparty/v8/src/profile-generator.h
+++ b/src/3rdparty/v8/src/profile-generator.h
@@ -72,6 +72,7 @@ class StringsStorage {
const char* GetName(int index);
inline const char* GetFunctionName(String* name);
inline const char* GetFunctionName(const char* name);
+ size_t GetUsedMemorySize() const;
private:
static const int kMaxNameSize = 1024;
@@ -529,35 +530,14 @@ class HeapEntry BASE_EMBEDDED {
void set_name(const char* name) { name_ = name; }
inline SnapshotObjectId id() { return id_; }
int self_size() { return self_size_; }
- int retained_size() { return retained_size_; }
- void add_retained_size(int size) { retained_size_ += size; }
- void set_retained_size(int size) { retained_size_ = size; }
INLINE(int index() const);
- int postorder_index() { return postorder_index_; }
- void set_postorder_index(int value) { postorder_index_ = value; }
int children_count() const { return children_count_; }
INLINE(int set_children_index(int index));
- INLINE(int set_retainers_index(int index));
void add_child(HeapGraphEdge* edge) {
children_arr()[children_count_++] = edge;
}
- void add_retainer(HeapGraphEdge* edge) {
- retainers_arr()[retainers_count_++] = edge;
- }
Vector<HeapGraphEdge*> children() {
return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
- Vector<HeapGraphEdge*> retainers() {
- return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
- INLINE(HeapEntry* dominator() const);
- void set_dominator(HeapEntry* entry) {
- ASSERT(entry != NULL);
- dominator_ = entry->index();
- }
- void clear_paint() { painted_ = false; }
- bool painted() { return painted_; }
- void paint() { painted_ = true; }
- bool user_reachable() { return user_reachable_; }
- void set_user_reachable() { user_reachable_ = true; }
void SetIndexedReference(
HeapGraphEdge::Type type, int index, HeapEntry* entry);
@@ -571,22 +551,12 @@ class HeapEntry BASE_EMBEDDED {
private:
INLINE(HeapGraphEdge** children_arr());
- INLINE(HeapGraphEdge** retainers_arr());
const char* TypeAsString();
- unsigned painted_: 1;
- unsigned user_reachable_: 1;
- int dominator_: 30;
unsigned type_: 4;
- int retainers_count_: 28;
- int retainers_index_;
- int children_count_;
+ int children_count_: 28;
int children_index_;
int self_size_;
- union {
- int postorder_index_; // Used during dominator tree building.
- int retained_size_; // At that moment, there is no retained size yet.
- };
SnapshotObjectId id_;
HeapSnapshot* snapshot_;
const char* name_;
@@ -626,7 +596,6 @@ class HeapSnapshot {
List<HeapEntry>& entries() { return entries_; }
List<HeapGraphEdge>& edges() { return edges_; }
List<HeapGraphEdge*>& children() { return children_; }
- List<HeapGraphEdge*>& retainers() { return retainers_; }
void RememberLastJSObjectId();
SnapshotObjectId max_snapshot_js_object_id() const {
return max_snapshot_js_object_id_;
@@ -640,11 +609,9 @@ class HeapSnapshot {
HeapEntry* AddGcRootsEntry();
HeapEntry* AddGcSubrootEntry(int tag);
HeapEntry* AddNativesRootEntry();
- void ClearPaint();
HeapEntry* GetEntryById(SnapshotObjectId id);
List<HeapEntry*>* GetSortedEntriesList();
- void SetDominatorsToSelf();
- void FillChildrenAndRetainers();
+ void FillChildren();
void Print(int max_depth);
void PrintEntriesSize();
@@ -661,7 +628,6 @@ class HeapSnapshot {
List<HeapEntry> entries_;
List<HeapGraphEdge> edges_;
List<HeapGraphEdge*> children_;
- List<HeapGraphEdge*> retainers_;
List<HeapEntry*> sorted_entries_;
SnapshotObjectId max_snapshot_js_object_id_;
@@ -684,7 +650,8 @@ class HeapObjectsMap {
}
void StopHeapObjectsTracking();
- void PushHeapObjectsStats(OutputStream* stream);
+ SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
+ size_t GetUsedMemorySize() const;
static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
static inline SnapshotObjectId GetNthGcSubrootId(int delta);
@@ -742,7 +709,7 @@ class HeapSnapshotsCollection {
~HeapSnapshotsCollection();
bool is_tracking_objects() { return is_tracking_objects_; }
- void PushHeapObjectsStats(OutputStream* stream) {
+ SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
return ids_.PushHeapObjectsStats(stream);
}
void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
@@ -769,6 +736,7 @@ class HeapSnapshotsCollection {
SnapshotObjectId last_assigned_id() const {
return ids_.last_assigned_id();
}
+ size_t GetUsedMemorySize() const;
private:
INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
@@ -957,10 +925,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
Object* child,
const char* name_format_string = NULL,
int field_offset = -1);
- void SetPropertyShortcutReference(HeapObject* parent_obj,
- int parent,
- String* reference_name,
- Object* child);
void SetUserGlobalReference(Object* user_global);
void SetRootGcRootsReference();
void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
@@ -1061,16 +1025,9 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
bool GenerateSnapshot();
private:
- bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
- Vector<int>* dominators);
- bool CalculateRetainedSizes();
bool FillReferences();
- void FillPostorderIndexes(Vector<HeapEntry*>* entries);
- bool IsUserGlobalReference(const HeapGraphEdge* edge);
- void MarkUserReachableObjects();
void ProgressStep();
bool ProgressReport(bool force = false);
- bool SetEntriesDominators();
void SetProgressTotal(int iterations_count);
HeapSnapshot* snapshot_;
@@ -1114,10 +1071,10 @@ class HeapSnapshotJSONSerializer {
int GetStringId(const char* s);
int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
- void SerializeEdges(const List<HeapEntry>& nodes);
+ void SerializeEdges();
void SerializeImpl();
- void SerializeNode(HeapEntry* entry, int edges_index);
- void SerializeNodes(const List<HeapEntry>& nodes);
+ void SerializeNode(HeapEntry* entry);
+ void SerializeNodes();
void SerializeSnapshot();
void SerializeString(const unsigned char* s);
void SerializeStrings();
diff --git a/src/3rdparty/v8/src/property-details.h b/src/3rdparty/v8/src/property-details.h
index c79aa96..64e3205 100644
--- a/src/3rdparty/v8/src/property-details.h
+++ b/src/3rdparty/v8/src/property-details.h
@@ -55,21 +55,18 @@ class Smi;
// Must fit in the BitField PropertyDetails::TypeField.
// A copy of this is in mirror-debugger.js.
enum PropertyType {
- NORMAL = 0, // only in slow mode
- FIELD = 1, // only in fast mode
- CONSTANT_FUNCTION = 2, // only in fast mode
+ // Only in slow mode.
+ NORMAL = 0,
+ // Only in fast mode.
+ FIELD = 1,
+ CONSTANT_FUNCTION = 2,
CALLBACKS = 3,
- HANDLER = 4, // only in lookup results, not in descriptors
- INTERCEPTOR = 5, // only in lookup results, not in descriptors
- // All properties before MAP_TRANSITION are real.
- MAP_TRANSITION = 6, // only in fast mode
- ELEMENTS_TRANSITION = 7,
- CONSTANT_TRANSITION = 8, // only in fast mode
- NULL_DESCRIPTOR = 9, // only in fast mode
- // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
- // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
- // nonexistent properties.
- NONEXISTENT = NULL_DESCRIPTOR
+ // Only in lookup results, not in descriptors.
+ HANDLER = 4,
+ INTERCEPTOR = 5,
+ TRANSITION = 6,
+ // Only used as a marker in LookupResult.
+ NONEXISTENT = 7
};
@@ -80,50 +77,64 @@ class PropertyDetails BASE_EMBEDDED {
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
int index = 0) {
- ASSERT(TypeField::is_valid(type));
- ASSERT(AttributesField::is_valid(attributes));
- ASSERT(StorageField::is_valid(index));
-
value_ = TypeField::encode(type)
| AttributesField::encode(attributes)
- | StorageField::encode(index);
+ | DictionaryStorageField::encode(index);
ASSERT(type == this->type());
ASSERT(attributes == this->attributes());
- ASSERT(index == this->index());
+ ASSERT(index == this->dictionary_index());
}
+ int pointer() { return DescriptorPointer::decode(value_); }
+
+ PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
+
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
inline Smi* AsSmi();
PropertyType type() { return TypeField::decode(value_); }
- PropertyAttributes attributes() { return AttributesField::decode(value_); }
+ PropertyAttributes attributes() const {
+ return AttributesField::decode(value_);
+ }
- int index() { return StorageField::decode(value_); }
+ int dictionary_index() {
+ return DictionaryStorageField::decode(value_);
+ }
+
+ int descriptor_index() {
+ return DescriptorStorageField::decode(value_);
+ }
inline PropertyDetails AsDeleted();
static bool IsValidIndex(int index) {
- return StorageField::is_valid(index);
+ return DictionaryStorageField::is_valid(index);
}
- bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
- bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
- bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
- bool IsDeleted() { return DeletedField::decode(value_) != 0;}
+ bool IsReadOnly() const { return (attributes() & READ_ONLY) != 0; }
+ bool IsDontDelete() const { return (attributes() & DONT_DELETE) != 0; }
+ bool IsDontEnum() const { return (attributes() & DONT_ENUM) != 0; }
+ bool IsDeleted() const { return DeletedField::decode(value_) != 0;}
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
- class TypeField: public BitField<PropertyType, 0, 4> {};
- class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
- class DeletedField: public BitField<uint32_t, 7, 1> {};
- class StorageField: public BitField<uint32_t, 8, 32-8> {};
+ class TypeField: public BitField<PropertyType, 0, 3> {};
+ class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
+ class DeletedField: public BitField<uint32_t, 6, 1> {};
+ class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
+ class DescriptorStorageField: public BitField<uint32_t, 7, 11> {};
+ class DescriptorPointer: public BitField<uint32_t, 18, 11> {};
static const int kInitialIndex = 1;
private:
+ PropertyDetails(int value, int pointer) {
+ value_ = DescriptorPointer::update(value, pointer);
+ }
+
uint32_t value_;
};
diff --git a/src/3rdparty/v8/src/property.cc b/src/3rdparty/v8/src/property.cc
index 78f237d..d05ef2b 100644
--- a/src/3rdparty/v8/src/property.cc
+++ b/src/3rdparty/v8/src/property.cc
@@ -43,99 +43,80 @@ void LookupResult::Iterate(ObjectVisitor* visitor) {
#ifdef OBJECT_PRINT
void LookupResult::Print(FILE* out) {
if (!IsFound()) {
- PrintF(out, "Not Found\n");
+ FPrintF(out, "Not Found\n");
return;
}
- PrintF(out, "LookupResult:\n");
- PrintF(out, " -cacheable = %s\n", IsCacheable() ? "true" : "false");
- PrintF(out, " -attributes = %x\n", GetAttributes());
+ FPrintF(out, "LookupResult:\n");
+ FPrintF(out, " -cacheable = %s\n", IsCacheable() ? "true" : "false");
+ FPrintF(out, " -attributes = %x\n", GetAttributes());
switch (type()) {
case NORMAL:
- PrintF(out, " -type = normal\n");
- PrintF(out, " -entry = %d", GetDictionaryEntry());
- break;
- case MAP_TRANSITION:
- PrintF(out, " -type = map transition\n");
- PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- PrintF(out, "\n");
- break;
- case ELEMENTS_TRANSITION:
- PrintF(out, " -type = elements transition\n");
- PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- PrintF(out, "\n");
+ FPrintF(out, " -type = normal\n");
+ FPrintF(out, " -entry = %d", GetDictionaryEntry());
break;
case CONSTANT_FUNCTION:
- PrintF(out, " -type = constant function\n");
- PrintF(out, " -function:\n");
+ FPrintF(out, " -type = constant function\n");
+ FPrintF(out, " -function:\n");
GetConstantFunction()->Print(out);
- PrintF(out, "\n");
+ FPrintF(out, "\n");
break;
case FIELD:
- PrintF(out, " -type = field\n");
- PrintF(out, " -index = %d", GetFieldIndex());
- PrintF(out, "\n");
+ FPrintF(out, " -type = field\n");
+ FPrintF(out, " -index = %d", GetFieldIndex());
+ FPrintF(out, "\n");
break;
case CALLBACKS:
- PrintF(out, " -type = call backs\n");
- PrintF(out, " -callback object:\n");
+ FPrintF(out, " -type = call backs\n");
+ FPrintF(out, " -callback object:\n");
GetCallbackObject()->Print(out);
break;
case HANDLER:
- PrintF(out, " -type = lookup proxy\n");
+ FPrintF(out, " -type = lookup proxy\n");
break;
case INTERCEPTOR:
- PrintF(out, " -type = lookup interceptor\n");
- break;
- case CONSTANT_TRANSITION:
- PrintF(out, " -type = constant property transition\n");
- PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
- PrintF(out, "\n");
+ FPrintF(out, " -type = lookup interceptor\n");
break;
- case NULL_DESCRIPTOR:
- PrintF(out, " =type = null descriptor\n");
+ case TRANSITION:
+ switch (GetTransitionDetails().type()) {
+ case FIELD:
+ FPrintF(out, " -type = map transition\n");
+ FPrintF(out, " -map:\n");
+ GetTransitionMap()->Print(out);
+ FPrintF(out, "\n");
+ return;
+ case CONSTANT_FUNCTION:
+ FPrintF(out, " -type = constant property transition\n");
+ FPrintF(out, " -map:\n");
+ GetTransitionMap()->Print(out);
+ FPrintF(out, "\n");
+ return;
+ case CALLBACKS:
+ FPrintF(out, " -type = callbacks transition\n");
+ FPrintF(out, " -callback object:\n");
+ GetCallbackObject()->Print(out);
+ return;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ case NONEXISTENT:
+ UNREACHABLE();
break;
}
}
void Descriptor::Print(FILE* out) {
- PrintF(out, "Descriptor ");
+ FPrintF(out, "Descriptor ");
GetKey()->ShortPrint(out);
- PrintF(out, " @ ");
+ FPrintF(out, " @ ");
GetValue()->ShortPrint(out);
- PrintF(out, " %d\n", GetDetails().index());
+ FPrintF(out, " %d\n", GetDetails().descriptor_index());
}
#endif
-bool Descriptor::ContainsTransition() {
- switch (details_.type()) {
- case MAP_TRANSITION:
- case CONSTANT_TRANSITION:
- case ELEMENTS_TRANSITION:
- return true;
- case CALLBACKS: {
- if (!value_->IsAccessorPair()) return false;
- AccessorPair* accessors = AccessorPair::cast(value_);
- return accessors->getter()->IsMap() || accessors->setter()->IsMap();
- }
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- case NULL_DESCRIPTOR:
- return false;
- }
- UNREACHABLE(); // Keep the compiler happy.
- return false;
-}
-
-
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/property.h b/src/3rdparty/v8/src/property.h
index ba5e3c8..3faa28b 100644
--- a/src/3rdparty/v8/src/property.h
+++ b/src/3rdparty/v8/src/property.h
@@ -29,6 +29,7 @@
#define V8_PROPERTY_H_
#include "allocation.h"
+#include "transitions.h"
namespace v8 {
namespace internal {
@@ -64,11 +65,10 @@ class Descriptor BASE_EMBEDDED {
#endif
void SetEnumerationIndex(int index) {
- ASSERT(PropertyDetails::IsValidIndex(index));
details_ = PropertyDetails(details_.attributes(), details_.type(), index);
}
- bool ContainsTransition();
+ void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
private:
String* key_;
@@ -93,7 +93,7 @@ class Descriptor BASE_EMBEDDED {
Object* value,
PropertyAttributes attributes,
PropertyType type,
- int index = 0)
+ int index)
: key_(key),
value_(value),
details_(attributes, type, index) { }
@@ -101,35 +101,6 @@ class Descriptor BASE_EMBEDDED {
friend class DescriptorArray;
};
-// A pointer from a map to the new map that is created by adding
-// a named property. These are key to the speed and functioning of V8.
-// The two maps should always have the same prototype, since
-// MapSpace::CreateBackPointers depends on this.
-class MapTransitionDescriptor: public Descriptor {
- public:
- MapTransitionDescriptor(String* key, Map* map, PropertyAttributes attributes)
- : Descriptor(key, map, attributes, MAP_TRANSITION) { }
-};
-
-class ElementsTransitionDescriptor: public Descriptor {
- public:
- ElementsTransitionDescriptor(String* key,
- Object* map_or_array)
- : Descriptor(key, map_or_array, PropertyDetails(NONE,
- ELEMENTS_TRANSITION)) { }
-};
-
-// Marks a field name in a map so that adding the field is guaranteed
-// to create a FIELD descriptor in the new map. Used after adding
-// a constant function the first time, creating a CONSTANT_FUNCTION
-// descriptor in the new map. This avoids creating multiple maps with
-// the same CONSTANT_FUNCTION field.
-class ConstTransitionDescriptor: public Descriptor {
- public:
- explicit ConstTransitionDescriptor(String* key, Map* map)
- : Descriptor(key, map, NONE, CONSTANT_TRANSITION) { }
-};
-
class FieldDescriptor: public Descriptor {
public:
@@ -146,7 +117,7 @@ class ConstantFunctionDescriptor: public Descriptor {
ConstantFunctionDescriptor(String* key,
JSFunction* function,
PropertyAttributes attributes,
- int index = 0)
+ int index)
: Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
};
@@ -161,35 +132,6 @@ class CallbacksDescriptor: public Descriptor {
};
-template <class T>
-bool IsPropertyDescriptor(T* desc) {
- switch (desc->type()) {
- case NORMAL:
- case FIELD:
- case CONSTANT_FUNCTION:
- case HANDLER:
- case INTERCEPTOR:
- return true;
- case CALLBACKS: {
- Object* callback_object = desc->GetCallbackObject();
- // Non-JavaScript (i.e. native) accessors are always a property, otherwise
- // either the getter or the setter must be an accessor. Put another way:
- // If we only see map transitions and holes in a pair, this is not a
- // property.
- return (!callback_object->IsAccessorPair() ||
- AccessorPair::cast(callback_object)->ContainsAccessor());
- }
- case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
- return false;
- }
- UNREACHABLE(); // keep the compiler happy
- return false;
-}
-
-
class LookupResult BASE_EMBEDDED {
public:
explicit LookupResult(Isolate* isolate)
@@ -198,7 +140,7 @@ class LookupResult BASE_EMBEDDED {
lookup_type_(NOT_FOUND),
holder_(NULL),
cacheable_(true),
- details_(NONE, NORMAL) {
+ details_(NONE, NONEXISTENT) {
isolate->SetTopLookupResult(this);
}
@@ -214,6 +156,13 @@ class LookupResult BASE_EMBEDDED {
number_ = number;
}
+ void TransitionResult(JSObject* holder, int number) {
+ lookup_type_ = TRANSITION_TYPE;
+ details_ = PropertyDetails(NONE, TRANSITION);
+ holder_ = holder;
+ number_ = number;
+ }
+
void ConstantResult(JSObject* holder) {
lookup_type_ = CONSTANT_TYPE;
holder_ = holder;
@@ -246,6 +195,7 @@ class LookupResult BASE_EMBEDDED {
void NotFound() {
lookup_type_ = NOT_FOUND;
+ details_ = PropertyDetails(NONE, NONEXISTENT);
holder_ = NULL;
}
@@ -265,24 +215,61 @@ class LookupResult BASE_EMBEDDED {
}
PropertyAttributes GetAttributes() {
+ ASSERT(!IsTransition());
ASSERT(IsFound());
+ ASSERT(details_.type() != NONEXISTENT);
return details_.attributes();
}
PropertyDetails GetPropertyDetails() {
+ ASSERT(!IsTransition());
return details_;
}
- bool IsReadOnly() { return details_.IsReadOnly(); }
+ bool IsFastPropertyType() {
+ ASSERT(IsFound());
+ return IsTransition() || type() != NORMAL;
+ }
+
+ // Property callbacks does not include transitions to callbacks.
+ bool IsPropertyCallbacks() {
+ ASSERT(!(details_.type() == CALLBACKS && !IsFound()));
+ return details_.type() == CALLBACKS;
+ }
+
+ bool IsReadOnly() {
+ ASSERT(IsFound());
+ ASSERT(!IsTransition());
+ ASSERT(details_.type() != NONEXISTENT);
+ return details_.IsReadOnly();
+ }
+
+ bool IsField() {
+ ASSERT(!(details_.type() == FIELD && !IsFound()));
+ return details_.type() == FIELD;
+ }
+
+ bool IsNormal() {
+ ASSERT(!(details_.type() == NORMAL && !IsFound()));
+ return details_.type() == NORMAL;
+ }
+
+ bool IsConstantFunction() {
+ ASSERT(!(details_.type() == CONSTANT_FUNCTION && !IsFound()));
+ return details_.type() == CONSTANT_FUNCTION;
+ }
+
bool IsDontDelete() { return details_.IsDontDelete(); }
bool IsDontEnum() { return details_.IsDontEnum(); }
bool IsDeleted() { return details_.IsDeleted(); }
bool IsFound() { return lookup_type_ != NOT_FOUND; }
+ bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
+ bool IsInterceptor() { return lookup_type_ == INTERCEPTOR_TYPE; }
// Is the result is a property excluding transitions and the null descriptor?
bool IsProperty() {
- return IsFound() && IsPropertyDescriptor(this);
+ return IsFound() && !IsTransition();
}
bool IsCacheable() { return cacheable_; }
@@ -303,36 +290,59 @@ class LookupResult BASE_EMBEDDED {
case CONSTANT_FUNCTION:
return GetConstantFunction();
default:
- return Smi::FromInt(0);
+ return Isolate::Current()->heap()->the_hole_value();
}
}
+ Map* GetTransitionTarget() {
+ ASSERT(IsTransition());
+ TransitionArray* transitions = holder()->map()->transitions();
+ return transitions->GetTarget(number_);
+ }
+
+ PropertyDetails GetTransitionDetails(Map* map) {
+ ASSERT(IsTransition());
+ TransitionArray* transitions = map->transitions();
+ return transitions->GetTargetDetails(number_);
+ }
+
+ PropertyDetails GetTransitionDetails() {
+ return GetTransitionDetails(holder()->map());
+ }
+
+ bool IsTransitionToField(Map* map) {
+ return IsTransition() && GetTransitionDetails(map).type() == FIELD;
+ }
Map* GetTransitionMap() {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION ||
- type() == ELEMENTS_TRANSITION ||
- type() == CONSTANT_TRANSITION);
+ ASSERT(IsTransition());
return Map::cast(GetValue());
}
Map* GetTransitionMapFromMap(Map* map) {
+ ASSERT(IsTransition());
+ return map->transitions()->GetTarget(number_);
+ }
+
+ int GetTransitionIndex() {
+ ASSERT(IsTransition());
+ return number_;
+ }
+
+ int GetDescriptorIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION);
- return Map::cast(map->instance_descriptors()->GetValue(number_));
+ return number_;
}
int GetFieldIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == FIELD);
+ ASSERT(IsField());
return Descriptor::IndexFromValue(GetValue());
}
int GetLocalFieldIndexFromMap(Map* map) {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == FIELD);
- return Descriptor::IndexFromValue(
- map->instance_descriptors()->GetValue(number_)) -
+ ASSERT(IsField());
+ return Descriptor::IndexFromValue(GetValueFromMap(map)) -
map->inobject_properties();
}
@@ -347,16 +357,15 @@ class LookupResult BASE_EMBEDDED {
}
JSFunction* GetConstantFunctionFromMap(Map* map) {
- ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
ASSERT(type() == CONSTANT_FUNCTION);
- return JSFunction::cast(map->instance_descriptors()->GetValue(number_));
+ return JSFunction::cast(GetValueFromMap(map));
}
Object* GetCallbackObject() {
if (lookup_type_ == CONSTANT_TYPE) {
- // For now we only have the __proto__ as constant type.
return HEAP->prototype_accessors();
}
+ ASSERT(!IsTransition());
return GetValue();
}
@@ -366,14 +375,19 @@ class LookupResult BASE_EMBEDDED {
Object* GetValue() {
if (lookup_type_ == DESCRIPTOR_TYPE) {
- DescriptorArray* descriptors = holder()->map()->instance_descriptors();
- return descriptors->GetValue(number_);
+ return GetValueFromMap(holder()->map());
}
// In the dictionary case, the data is held in the value field.
ASSERT(lookup_type_ == DICTIONARY_TYPE);
return holder()->GetNormalizedProperty(this);
}
+ Object* GetValueFromMap(Map* map) const {
+ ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+ ASSERT(number_ < map->NumberOfOwnDescriptors());
+ return map->instance_descriptors()->GetValue(number_);
+ }
+
void Iterate(ObjectVisitor* visitor);
private:
@@ -384,6 +398,7 @@ class LookupResult BASE_EMBEDDED {
enum {
NOT_FOUND,
DESCRIPTOR_TYPE,
+ TRANSITION_TYPE,
DICTIONARY_TYPE,
HANDLER_TYPE,
INTERCEPTOR_TYPE,
diff --git a/src/3rdparty/v8/src/proxy.js b/src/3rdparty/v8/src/proxy.js
index 4e86c88..53a3572 100644
--- a/src/3rdparty/v8/src/proxy.js
+++ b/src/3rdparty/v8/src/proxy.js
@@ -31,7 +31,7 @@ global.Proxy = new $Object();
var $Proxy = global.Proxy
-$Proxy.create = function(handler, proto) {
+function ProxyCreate(handler, proto) {
if (!IS_SPEC_OBJECT(handler))
throw MakeTypeError("handler_non_object", ["create"])
if (IS_UNDEFINED(proto))
@@ -41,7 +41,7 @@ $Proxy.create = function(handler, proto) {
return %CreateJSProxy(handler, proto)
}
-$Proxy.createFunction = function(handler, callTrap, constructTrap) {
+function ProxyCreateFunction(handler, callTrap, constructTrap) {
if (!IS_SPEC_OBJECT(handler))
throw MakeTypeError("handler_non_object", ["create"])
if (!IS_SPEC_FUNCTION(callTrap))
@@ -62,6 +62,11 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) {
handler, callTrap, constructTrap, $Function.prototype)
}
+%CheckIsBootstrapping()
+InstallFunctions($Proxy, DONT_ENUM, [
+ "create", ProxyCreate,
+ "createFunction", ProxyCreateFunction
+])
////////////////////////////////////////////////////////////////////////////////
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc
index aa67919..16766ca 100644
--- a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.cc
@@ -38,8 +38,10 @@ namespace internal {
#ifdef V8_INTERPRETED_REGEXP
-RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
- : buffer_(buffer),
+RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer,
+ Zone* zone)
+ : RegExpMacroAssembler(zone),
+ buffer_(buffer),
pc_(0),
own_buffer_(false),
advance_current_end_(kInvalidPC) {
@@ -203,8 +205,9 @@ void RegExpMacroAssemblerIrregexp::PushBacktrack(Label* l) {
}
-void RegExpMacroAssemblerIrregexp::Succeed() {
+bool RegExpMacroAssemblerIrregexp::Succeed() {
Emit(BC_SUCCEED, 0);
+ return false; // Restart matching for global regexp not supported.
}
@@ -407,17 +410,6 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
}
-void RegExpMacroAssemblerIrregexp::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- ASSERT(reg1 >= 0);
- ASSERT(reg1 <= kMaxRegister);
- Emit(BC_CHECK_NOT_REGS_EQUAL, reg1);
- Emit32(reg2);
- EmitOrLink(on_not_equal);
-}
-
-
void RegExpMacroAssemblerIrregexp::CheckCharacters(
Vector<const uc16> str,
int cp_offset,
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h
index 25cb68d..4bc2980 100644
--- a/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h
+++ b/src/3rdparty/v8/src/regexp-macro-assembler-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -48,7 +48,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- explicit RegExpMacroAssemblerIrregexp(Vector<byte>);
+ RegExpMacroAssemblerIrregexp(Vector<byte>, Zone* zone);
virtual ~RegExpMacroAssemblerIrregexp();
// The byte-code interpreter checks on each push anyway.
virtual int stack_limit_slack() { return 1; }
@@ -59,7 +59,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void Backtrack();
virtual void GoTo(Label* label);
virtual void PushBacktrack(Label* label);
- virtual void Succeed();
+ virtual bool Succeed();
virtual void Fail();
virtual void PopRegister(int register_index);
virtual void PushRegister(int register_index,
@@ -103,7 +103,6 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc b/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc
index b7aeac4..f878e8c 100644
--- a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc
+++ b/src/3rdparty/v8/src/regexp-macro-assembler-tracer.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,6 +35,7 @@ namespace internal {
RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler* assembler) :
+ RegExpMacroAssembler(assembler->zone()),
assembler_(assembler) {
unsigned int type = assembler->Implementation();
ASSERT(type < 5);
@@ -102,14 +103,15 @@ void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) {
}
-void RegExpMacroAssemblerTracer::Succeed() {
- PrintF(" Succeed();\n");
- assembler_->Succeed();
+bool RegExpMacroAssemblerTracer::Succeed() {
+ bool restart = assembler_->Succeed();
+ PrintF(" Succeed();%s\n", restart ? " [restart for global match]" : "");
+ return restart;
}
void RegExpMacroAssemblerTracer::Fail() {
- PrintF(" Fail();\n");
+ PrintF(" Fail();");
assembler_->Fail();
}
@@ -381,17 +383,6 @@ void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
}
-void RegExpMacroAssemblerTracer::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- PrintF(" CheckNotRegistersEqual(reg1=%d, reg2=%d, label[%08x]);\n",
- reg1,
- reg2,
- LabelToInt(on_not_equal));
- assembler_->CheckNotRegistersEqual(reg1, reg2, on_not_equal);
-}
-
-
void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h b/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h
index 3fd4d8b..ac262df 100644
--- a/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h
+++ b/src/3rdparty/v8/src/regexp-macro-assembler-tracer.h
@@ -59,7 +59,6 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned and_with,
@@ -98,7 +97,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
+ virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler.cc b/src/3rdparty/v8/src/regexp-macro-assembler.cc
index b6fb3c5..82ba34d 100644
--- a/src/3rdparty/v8/src/regexp-macro-assembler.cc
+++ b/src/3rdparty/v8/src/regexp-macro-assembler.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,7 +35,10 @@
namespace v8 {
namespace internal {
-RegExpMacroAssembler::RegExpMacroAssembler() : slow_safe_compiler_(false) {
+RegExpMacroAssembler::RegExpMacroAssembler(Zone* zone)
+ : slow_safe_compiler_(false),
+ global_mode_(NOT_GLOBAL),
+ zone_(zone) {
}
@@ -54,8 +57,8 @@ bool RegExpMacroAssembler::CanReadUnaligned() {
#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
-NativeRegExpMacroAssembler::NativeRegExpMacroAssembler()
- : RegExpMacroAssembler() {
+NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Zone* zone)
+ : RegExpMacroAssembler(zone) {
}
@@ -64,11 +67,7 @@ NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
bool NativeRegExpMacroAssembler::CanReadUnaligned() {
-#ifdef V8_TARGET_CAN_READ_UNALIGNED
- return !slow_safe();
-#else
- return false;
-#endif
+ return FLAG_enable_unaligned_accesses && !slow_safe();
}
const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
@@ -149,6 +148,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
input_start,
input_end,
offsets_vector,
+ offsets_vector_length,
isolate);
return res;
}
@@ -161,6 +161,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
const byte* input_start,
const byte* input_end,
int* output,
+ int output_size,
Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
// Ensure that the minimum stack has been allocated.
@@ -174,10 +175,10 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
input_start,
input_end,
output,
+ output_size,
stack_base,
direct_call,
isolate);
- ASSERT(result <= SUCCESS);
ASSERT(result >= RETRY);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler.h b/src/3rdparty/v8/src/regexp-macro-assembler.h
index 8587435..bcf3673 100644
--- a/src/3rdparty/v8/src/regexp-macro-assembler.h
+++ b/src/3rdparty/v8/src/regexp-macro-assembler.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -63,7 +63,7 @@ class RegExpMacroAssembler {
kCheckStackLimit = true
};
- RegExpMacroAssembler();
+ explicit RegExpMacroAssembler(Zone* zone);
virtual ~RegExpMacroAssembler();
// The maximal number of pushes between stack checks. Users must supply
// kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
@@ -128,10 +128,6 @@ class RegExpMacroAssembler {
// array, and if the found byte is non-zero, we jump to the on_bit_set label.
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set) = 0;
- virtual void CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) = 0;
-
// Checks whether the given offset from the current position is before
// the end of the string. May overwrite the current character.
virtual void CheckPosition(int cp_offset, Label* on_outside_input) {
@@ -174,7 +170,8 @@ class RegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg) = 0;
virtual void SetCurrentPositionFromEnd(int by) = 0;
virtual void SetRegister(int register_index, int to) = 0;
- virtual void Succeed() = 0;
+ // Return whether the matching (with a global regexp) will be restarted.
+ virtual bool Succeed() = 0;
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;
virtual void ClearRegisters(int reg_from, int reg_to) = 0;
virtual void WriteStackPointerToRegister(int reg) = 0;
@@ -183,8 +180,21 @@ class RegExpMacroAssembler {
void set_slow_safe(bool ssc) { slow_safe_compiler_ = ssc; }
bool slow_safe() { return slow_safe_compiler_; }
+ enum GlobalMode { NOT_GLOBAL, GLOBAL, GLOBAL_NO_ZERO_LENGTH_CHECK };
+ // Set whether the regular expression has the global flag. Exiting due to
+ // a failure in a global regexp may still mean success overall.
+ inline void set_global_mode(GlobalMode mode) { global_mode_ = mode; }
+ inline bool global() { return global_mode_ != NOT_GLOBAL; }
+ inline bool global_with_zero_length_check() {
+ return global_mode_ == GLOBAL;
+ }
+
+ Zone* zone() const { return zone_; }
+
private:
bool slow_safe_compiler_;
+ bool global_mode_;
+ Zone* zone_;
};
@@ -206,7 +216,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
// capture positions.
enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
- NativeRegExpMacroAssembler();
+ explicit NativeRegExpMacroAssembler(Zone* zone);
virtual ~NativeRegExpMacroAssembler();
virtual bool CanReadUnaligned();
@@ -249,6 +259,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
const byte* input_start,
const byte* input_end,
int* output,
+ int output_size,
Isolate* isolate);
};
diff --git a/src/3rdparty/v8/src/regexp-stack.cc b/src/3rdparty/v8/src/regexp-stack.cc
index ff9547f..325a149 100644
--- a/src/3rdparty/v8/src/regexp-stack.cc
+++ b/src/3rdparty/v8/src/regexp-stack.cc
@@ -51,6 +51,7 @@ RegExpStack::RegExpStack()
RegExpStack::~RegExpStack() {
+ thread_local_.Free();
}
diff --git a/src/3rdparty/v8/src/regexp.js b/src/3rdparty/v8/src/regexp.js
index a574f62..a3675f0 100644
--- a/src/3rdparty/v8/src/regexp.js
+++ b/src/3rdparty/v8/src/regexp.js
@@ -140,18 +140,15 @@ function BuildResultFromMatchInfo(lastMatchInfo, s) {
var j = REGEXP_FIRST_CAPTURE + 2;
for (var i = 1; i < numResults; i++) {
start = lastMatchInfo[j++];
- end = lastMatchInfo[j++];
- if (end != -1) {
+ if (start != -1) {
+ end = lastMatchInfo[j];
if (start + 1 == end) {
result[i] = %_StringCharAt(s, start);
} else {
result[i] = %_SubString(s, start, end);
}
- } else {
- // Make sure the element is present. Avoid reading the undefined
- // property from the global object since this may change.
- result[i] = void 0;
}
+ j++;
}
return result;
}
@@ -278,6 +275,10 @@ function TrimRegExp(regexp) {
function RegExpToString() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['RegExp.prototype.toString', this]);
+ }
var result = '/' + this.source + '/';
if (this.global) result += 'g';
if (this.ignoreCase) result += 'i';
@@ -423,6 +424,7 @@ function SetUpRegExp() {
LAST_INPUT(lastMatchInfo) = ToString(string);
};
+ %OptimizeObjectForAddingMultipleProperties($RegExp, 22);
%DefineOrRedefineAccessorProperty($RegExp, 'input', RegExpGetInput,
RegExpSetInput, DONT_DELETE);
%DefineOrRedefineAccessorProperty($RegExp, '$_', RegExpGetInput,
@@ -477,6 +479,7 @@ function SetUpRegExp() {
RegExpMakeCaptureGetter(i), NoOpSetter,
DONT_DELETE);
}
+ %ToFastProperties($RegExp);
}
SetUpRegExp();
diff --git a/src/3rdparty/v8/src/rewriter.cc b/src/3rdparty/v8/src/rewriter.cc
index e58ddb4..6541546 100644
--- a/src/3rdparty/v8/src/rewriter.cc
+++ b/src/3rdparty/v8/src/rewriter.cc
@@ -38,12 +38,12 @@ namespace internal {
class Processor: public AstVisitor {
public:
- explicit Processor(Variable* result)
+ Processor(Variable* result, Zone* zone)
: result_(result),
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(isolate()) { }
+ factory_(isolate(), zone) { }
virtual ~Processor() { }
@@ -230,8 +230,8 @@ EXPRESSION_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
-// Assumes code has been parsed and scopes have been analyzed. Mutates the
-// AST, so the AST should not continue to be used in the case of failure.
+// Assumes code has been parsed. Mutates the AST, so the AST should not
+// continue to be used in the case of failure.
bool Rewriter::Rewrite(CompilationInfo* info) {
FunctionLiteral* function = info->function();
ASSERT(function != NULL);
@@ -243,7 +243,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
if (!body->is_empty()) {
Variable* result = scope->NewTemporary(
info->isolate()->factory()->result_symbol());
- Processor processor(result);
+ Processor processor(result, info->zone());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
@@ -257,12 +257,12 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
// coincides with the end of the with scope which is the position of '1'.
int position = function->end_position();
VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
- result->name(), false, position);
+ result->name(), false, Interface::NewValue(), position);
result_proxy->BindTo(result);
Statement* result_statement =
processor.factory()->NewReturnStatement(result_proxy);
result_statement->set_statement_pos(position);
- body->Add(result_statement);
+ body->Add(result_statement, info->zone());
}
}
diff --git a/src/3rdparty/v8/src/runtime-profiler.cc b/src/3rdparty/v8/src/runtime-profiler.cc
index 568e48e..23f41fa 100644
--- a/src/3rdparty/v8/src/runtime-profiler.cc
+++ b/src/3rdparty/v8/src/runtime-profiler.cc
@@ -34,6 +34,7 @@
#include "compilation-cache.h"
#include "deoptimizer.h"
#include "execution.h"
+#include "full-codegen.h"
#include "global-handles.h"
#include "isolate-inl.h"
#include "mark-compact.h"
@@ -65,16 +66,24 @@ static const int kSizeLimit = 1500;
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
+// If the function optimization was disabled due to high deoptimization count,
+// but the function is hot and has been seen on the stack this number of times,
+// then we try to reenable optimization for this function.
+static const int kProfilerTicksBeforeReenablingOptimization = 250;
// If a function does not have enough type info (according to
// FLAG_type_info_threshold), but has seen a huge number of ticks,
// optimize it as it is.
static const int kTicksWhenNotEnoughTypeInfo = 100;
// We only have one byte to store the number of ticks.
+STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
+STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
+
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
-static const int kMaxSizeEarlyOpt = 500;
+static const int kMaxSizeEarlyOpt =
+ 5 * FullCodeGenerator::kBackEdgeDistanceUnit;
Atomic32 RuntimeProfiler::state_ = 0;
@@ -144,15 +153,20 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
PrintF("]\n");
}
- // The next call to the function will trigger optimization.
- function->MarkForLazyRecompilation();
+ if (FLAG_parallel_recompilation) {
+ function->MarkForParallelRecompilation();
+ } else {
+ // The next call to the function will trigger optimization.
+ function->MarkForLazyRecompilation();
+ }
}
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
- ASSERT(function->IsMarkedForLazyRecompilation());
+ ASSERT(function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForParallelRecompilation());
if (!FLAG_use_osr ||
isolate_->DebuggerHasBreakPoints() ||
function->IsBuiltin()) {
@@ -211,7 +225,10 @@ int RuntimeProfiler::LookupSample(JSFunction* function) {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* sample = sampler_window_[i];
if (sample != NULL) {
- if (function == sample) {
+ bool fits = FLAG_lookup_sample_by_shared
+ ? (function->shared() == JSFunction::cast(sample)->shared())
+ : (function == JSFunction::cast(sample));
+ if (fits) {
weight += sampler_window_weight_[i];
}
}
@@ -263,30 +280,45 @@ void RuntimeProfiler::OptimizeNow() {
}
}
- Code* shared_code = function->shared()->code();
+ SharedFunctionInfo* shared = function->shared();
+ Code* shared_code = shared->code();
+
if (shared_code->kind() != Code::FUNCTION) continue;
- if (function->IsMarkedForLazyRecompilation()) {
+ if (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForParallelRecompilation()) {
int nesting = shared_code->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
}
- // Do not record non-optimizable functions.
- if (!function->IsOptimizable()) continue;
- if (function->shared()->optimization_disabled()) continue;
-
// Only record top-level code on top of the execution stack and
// avoid optimizing excessively large scripts since top-level code
// will be executed only once.
const int kMaxToplevelSourceSize = 10 * 1024;
- if (function->shared()->is_toplevel()
- && (frame_count > 1
- || function->shared()->SourceSize() > kMaxToplevelSourceSize)) {
+ if (shared->is_toplevel() &&
+ (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
continue;
}
+ // Do not record non-optimizable functions.
+ if (shared->optimization_disabled()) {
+ if (shared->deopt_count() >= FLAG_max_opt_count) {
+ // If optimization was disabled due to many deoptimizations,
+ // then check if the function is hot and try to reenable optimization.
+ int ticks = shared_code->profiler_ticks();
+ if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
+ shared_code->set_profiler_ticks(0);
+ shared->TryReenableOptimization();
+ } else {
+ shared_code->set_profiler_ticks(ticks + 1);
+ }
+ }
+ continue;
+ }
+ if (!function->IsOptimizable()) continue;
+
if (FLAG_watch_ic_patching) {
int ticks = shared_code->profiler_ticks();
@@ -309,7 +341,7 @@ void RuntimeProfiler::OptimizeNow() {
}
}
} else if (!any_ic_changed_ &&
- shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
Optimize(function, "small function");
diff --git a/src/3rdparty/v8/src/runtime.cc b/src/3rdparty/v8/src/runtime.cc
index 919bf7a..08b0cd1 100644
--- a/src/3rdparty/v8/src/runtime.cc
+++ b/src/3rdparty/v8/src/runtime.cc
@@ -46,6 +46,7 @@
#include "isolate-inl.h"
#include "jsregexp.h"
#include "json-parser.h"
+#include "json-stringifier.h"
#include "liveedit.h"
#include "liveobjectlist-inl.h"
#include "misc-intrinsics.h"
@@ -54,7 +55,7 @@
#include "runtime-profiler.h"
#include "runtime.h"
#include "scopeinfo.h"
-#include "smart-array-pointer.h"
+#include "smart-pointers.h"
#include "string-search.h"
#include "stub-cache.h"
#include "v8threads.h"
@@ -208,8 +209,10 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
// Pixel elements cannot be created using an object literal.
ASSERT(!copy->HasExternalArrayElements());
switch (copy->GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements());
if (elements->map() == heap->fixed_cow_array_map()) {
isolate->counters()->cow_arrays_created_runtime()->Increment();
@@ -223,7 +226,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
Object* value = elements->get(i);
ASSERT(value->IsSmi() ||
value->IsTheHole() ||
- (copy->GetElementsKind() == FAST_ELEMENTS));
+ (IsFastObjectElementsKind(copy->GetElementsKind())));
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
{ MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
@@ -268,6 +271,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
// No contained objects, nothing to do.
break;
}
@@ -300,7 +304,7 @@ static Handle<Map> ComputeObjectLiteralMap(
}
}
// If we only have symbols and array indices among keys then we can
- // use the map cache in the global context.
+ // use the map cache in the native context.
const int kMaxKeys = 10;
if ((number_of_symbol_keys == number_of_properties) &&
(number_of_symbol_keys < kMaxKeys)) {
@@ -339,14 +343,14 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<FixedArray> constant_properties,
bool should_have_fast_elements,
bool has_function_literal) {
- // Get the global context from the literals array. This is the
+ // Get the native context from the literals array. This is the
// context in which the function was created and we use the object
// function from this context to create the object literal. We do
- // not use the object function from the current global context
+ // not use the object function from the current native context
// because this might be the object function from another context
// which we should not have access to.
Handle<Context> context =
- Handle<Context>(JSFunction::GlobalContextFromLiterals(*literals));
+ Handle<Context>(JSFunction::NativeContextFromLiterals(*literals));
// In case we have function literals, we want the object to be in
// slow properties mode for now. We don't go in the map cache because
@@ -452,7 +456,7 @@ MaybeObject* TransitionElements(Handle<Object> object,
}
-static const int kSmiOnlyLiteralMinimumLength = 1024;
+static const int kSmiLiteralMinimumLength = 1024;
Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
@@ -461,7 +465,7 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<FixedArray> elements) {
// Create the JSArray.
Handle<JSFunction> constructor(
- JSFunction::GlobalContextFromLiterals(*literals)->array_function());
+ JSFunction::NativeContextFromLiterals(*literals)->array_function());
Handle<JSArray> object =
Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
@@ -470,23 +474,22 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(elements->get(1)));
- Context* global_context = isolate->context()->global_context();
- if (constant_elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- object->set_map(Map::cast(global_context->smi_js_array_map()));
- } else if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
- object->set_map(Map::cast(global_context->double_js_array_map()));
- } else {
- object->set_map(Map::cast(global_context->object_js_array_map()));
- }
+ ASSERT(IsFastElementsKind(constant_elements_kind));
+ Context* native_context = isolate->context()->native_context();
+ Object* maybe_maps_array = native_context->js_array_maps();
+ ASSERT(!maybe_maps_array->IsUndefined());
+ Object* maybe_map = FixedArray::cast(maybe_maps_array)->get(
+ constant_elements_kind);
+ ASSERT(maybe_map->IsMap());
+ object->set_map(Map::cast(maybe_map));
Handle<FixedArrayBase> copied_elements_values;
- if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
+ if (IsFastDoubleElementsKind(constant_elements_kind)) {
ASSERT(FLAG_smi_only_arrays);
copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
Handle<FixedDoubleArray>::cast(constant_elements_values));
} else {
- ASSERT(constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
- constant_elements_kind == FAST_ELEMENTS);
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind));
const bool is_cow =
(constant_elements_values->map() ==
isolate->heap()->fixed_cow_array_map());
@@ -522,15 +525,22 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
object->set_elements(*copied_elements_values);
object->set_length(Smi::FromInt(copied_elements_values->length()));
- // Ensure that the boilerplate object has FAST_ELEMENTS, unless the flag is
+ // Ensure that the boilerplate object has FAST_*_ELEMENTS, unless the flag is
// on or the object is larger than the threshold.
if (!FLAG_smi_only_arrays &&
- constant_elements_values->length() < kSmiOnlyLiteralMinimumLength) {
- if (object->GetElementsKind() != FAST_ELEMENTS) {
- CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure());
+ constant_elements_values->length() < kSmiLiteralMinimumLength) {
+ ElementsKind elements_kind = object->GetElementsKind();
+ if (!IsFastObjectElementsKind(elements_kind)) {
+ if (IsFastHoleyElementsKind(elements_kind)) {
+ CHECK(!TransitionElements(object, FAST_HOLEY_ELEMENTS,
+ isolate)->IsFailure());
+ } else {
+ CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure());
+ }
}
}
+ object->ValidateElements();
return object;
}
@@ -626,6 +636,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index), isolate);
if (*boilerplate == isolate->heap()->undefined_value()) {
+ ASSERT(*elements != isolate->heap()->empty_fixed_array());
boilerplate =
Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
@@ -663,7 +674,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
ASSERT(args.length() == 2);
- Object* handler = args[0];
+ CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* prototype = args[1];
Object* used_prototype =
prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
@@ -673,9 +684,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
ASSERT(args.length() == 4);
- Object* handler = args[0];
+ CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* call_trap = args[1];
- Object* construct_trap = args[2];
+ RUNTIME_ASSERT(call_trap->IsJSFunction() || call_trap->IsJSFunctionProxy());
+ CONVERT_ARG_CHECKED(JSFunction, construct_trap, 2);
Object* prototype = args[3];
Object* used_prototype =
prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
@@ -745,7 +757,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
table = ObjectHashSetAdd(table, key);
holder->set_table(*table);
- return isolate->heap()->undefined_symbol();
+ return isolate->heap()->undefined_value();
}
@@ -767,7 +779,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
table = ObjectHashSetRemove(table, key);
holder->set_table(*table);
- return isolate->heap()->undefined_symbol();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetGetSize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
+ Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+ return Smi::FromInt(table->NumberOfElements());
}
@@ -785,8 +806,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<Object> key(args[1]);
- return ObjectHashTable::cast(holder->table())->Lookup(*key);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapHas) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ Handle<ObjectHashTable> new_table =
+ PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
+ holder->set_table(*new_table);
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -794,12 +842,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<Object> key(args[1]);
- Handle<Object> value(args[2]);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
holder->set_table(*new_table);
- return *value;
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGetSize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+ return Smi::FromInt(table->NumberOfElements());
}
@@ -816,11 +873,38 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
- NoHandleAllocation ha;
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapHas) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapDelete) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- return ObjectHashTable::cast(weakmap->table())->Lookup(*key);
+ Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<Object> lookup(table->Lookup(*key));
+ Handle<ObjectHashTable> new_table =
+ PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
+ weakmap->set_table(*new_table);
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -833,7 +917,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
weakmap->set_table(*new_table);
- return *value;
+ return isolate->heap()->undefined_value();
}
@@ -889,13 +973,13 @@ static void GetOwnPropertyImplementation(JSObject* obj,
LookupResult* result) {
obj->LocalLookupRealNamedProperty(name, result);
- if (!result->IsProperty()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype())
- GetOwnPropertyImplementation(JSObject::cast(proto),
- name, result);
- }
+ if (result->IsFound()) return;
+
+ Object* proto = obj->GetPrototype();
+ if (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype())
+ GetOwnPropertyImplementation(JSObject::cast(proto),
+ name, result);
}
@@ -1007,7 +1091,7 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
// This could be an element.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- switch (obj->HasLocalElement(index)) {
+ switch (obj->GetLocalElementType(index)) {
case JSObject::UNDEFINED_ELEMENT:
return heap->undefined_value();
@@ -1106,7 +1190,7 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum()));
elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
- bool is_js_accessor = (result.type() == CALLBACKS) &&
+ bool is_js_accessor = result.IsPropertyCallbacks() &&
(result.GetCallbackObject()->IsAccessorPair());
if (is_js_accessor) {
@@ -1114,11 +1198,13 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
elms->set(IS_ACCESSOR_INDEX, heap->true_value());
AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
- if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
+ Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
+ if (!getter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
+ elms->set(GETTER_INDEX, getter);
}
- if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
+ Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
+ if (!setter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
+ elms->set(SETTER_INDEX, setter);
}
} else {
elms->set(IS_ACCESSOR_INDEX, heap->false_value());
@@ -1179,7 +1265,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
+ Handle<Object> result =
+ RegExpImpl::Compile(re, pattern, flags, isolate->runtime_zone());
if (result.is_null()) return Failure::Exception();
return *result;
}
@@ -1226,13 +1313,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
bool needs_access_checks = old_map->is_access_check_needed();
if (needs_access_checks) {
// Copy map so it won't interfere constructor's initial map.
- Object* new_map;
- { MaybeObject* maybe_new_map = old_map->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
+ Map* new_map;
+ MaybeObject* maybe_new_map = old_map->Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- Map::cast(new_map)->set_is_access_check_needed(false);
- object->set_map(Map::cast(new_map));
+ new_map->set_is_access_check_needed(false);
+ object->set_map(new_map);
}
return isolate->heap()->ToBoolean(needs_access_checks);
}
@@ -1244,13 +1330,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
Map* old_map = object->map();
if (!old_map->is_access_check_needed()) {
// Copy map so it won't interfere constructor's initial map.
- Object* new_map;
- { MaybeObject* maybe_new_map = old_map->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- }
+ Map* new_map;
+ MaybeObject* maybe_new_map = old_map->Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- Map::cast(new_map)->set_is_access_check_needed(true);
- object->set_map(Map::cast(new_map));
+ new_map->set_is_access_check_needed(true);
+ object->set_map(new_map);
}
return isolate->heap()->undefined_value();
}
@@ -1277,8 +1362,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
CONVERT_SMI_ARG_CHECKED(flags, 2);
- Handle<JSObject> js_global = Handle<JSObject>(isolate->context()->global());
- Handle<JSObject> qml_global = Handle<JSObject>(isolate->context()->qml_global());
+ Handle<JSObject> js_global = Handle<JSObject>(isolate->context()->global_object());
+ Handle<JSObject> qml_global = Handle<JSObject>(isolate->context()->qml_global_object());
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
@@ -1303,16 +1388,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
if (is_var || is_const) {
// Lookup the property in the global object, and don't set the
// value of the variable if the property is already there.
- // Do the lookup locally only, see ES5 errata.
+ // Do the lookup locally only, see ES5 erratum.
LookupResult lookup(isolate);
- if (FLAG_es52_globals)
- global->LocalLookup(*name, &lookup, true);
- else
+ if (FLAG_es52_globals) {
+ Object* obj = *global;
+ do {
+ JSObject::cast(obj)->LocalLookup(*name, &lookup, true);
+ if (lookup.IsFound()) break;
+ obj = obj->GetPrototype();
+ } while (obj->IsJSObject() &&
+ JSObject::cast(obj)->map()->is_hidden_prototype());
+ } else {
global->Lookup(*name, &lookup, true);
- if (lookup.IsProperty()) {
+ }
+ if (lookup.IsFound()) {
// We found an existing property. Unless it was an interceptor
// that claims the property is absent, skip this declaration.
- if (lookup.type() != INTERCEPTOR) continue;
+ if (!lookup.IsInterceptor()) continue;
PropertyAttributes attributes = global->GetPropertyAttribute(*name);
if (attributes != ABSENT) continue;
// Fall-through and introduce the absent property by using
@@ -1345,12 +1437,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
- if (!lookup.IsProperty() || is_function || is_module) {
+ if (!lookup.IsFound() || is_function || is_module) {
// If the local property exists, check that we can reconfigure it
// as required for function declarations.
- if (lookup.IsProperty() && lookup.IsDontDelete()) {
+ if (lookup.IsFound() && lookup.IsDontDelete()) {
if (lookup.IsReadOnly() || lookup.IsDontEnum() ||
- lookup.type() == CALLBACKS) {
+ lookup.IsPropertyCallbacks()) {
return ThrowRedeclarationError(
isolate, is_function ? "function" : "module", name);
}
@@ -1380,7 +1472,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
- // Declarations are always made in a function or global context. In the
+ // Declarations are always made in a function or native context. In the
// case of eval code, the context passed is the context of the caller,
// which may be some nested context and not the declaration context.
RUNTIME_ASSERT(args[0]->IsContext());
@@ -1419,7 +1511,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
}
} else {
// Slow case: The property is in the context extension object of a
- // function context or the global object of a global context.
+ // function context or the global object of a native context.
Handle<JSObject> object = Handle<JSObject>::cast(holder);
RETURN_IF_EMPTY_HANDLE(
isolate,
@@ -1460,7 +1552,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
!object->IsJSContextExtensionObject()) {
LookupResult lookup(isolate);
object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.type() == CALLBACKS)) {
+ if (lookup.IsPropertyCallbacks()) {
return ThrowRedeclarationError(isolate, "const", name);
}
}
@@ -1499,7 +1591,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
RUNTIME_ASSERT(args[2]->IsSmi());
int qml_mode = Smi::cast(args[2])->value();
- JSObject* global = qml_mode?isolate->context()->qml_global():isolate->context()->global();
+ JSObject* global = qml_mode ? isolate->context()->qml_global_object()
+ : isolate->context()->global_object();
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable.
@@ -1518,7 +1611,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
JSObject::cast(object)->map()->is_hidden_prototype()) {
JSObject* raw_holder = JSObject::cast(object);
raw_holder->LocalLookup(*name, &lookup, true);
- if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
+ if (lookup.IsInterceptor()) {
HandleScope handle_scope(isolate);
Handle<JSObject> holder(raw_holder);
PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
@@ -1538,10 +1631,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
}
// Reload global in case the loop above performed a GC.
- global = qml_mode?isolate->context()->qml_global():isolate->context()->global();
+ global = qml_mode ? isolate->context()->qml_global_object()
+ : isolate->context()->global_object();
if (assign) {
return global->SetProperty(
- *name, args[3], attributes, strict_mode_flag, true);
+ *name, args[3], attributes, strict_mode_flag, JSReceiver::MAY_BE_STORE_FROM_KEYED, true);
}
return isolate->heap()->undefined_value();
}
@@ -1559,7 +1653,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
int qml_mode = Smi::cast(args[2])->value();
// Get the current global object from top.
- JSObject* global = qml_mode?isolate->context()->qml_global():isolate->context()->global();
+ JSObject* global = qml_mode ? isolate->context()->qml_global_object()
+ : isolate->context()->global_object();
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable. Since it's a const, it must be READ_ONLY too.
@@ -1573,7 +1668,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// We use SetLocalPropertyIgnoreAttributes instead
LookupResult lookup(isolate);
global->LocalLookup(*name, &lookup);
- if (!lookup.IsProperty()) {
+ if (!lookup.IsFound()) {
return global->SetLocalPropertyIgnoreAttributes(*name,
*value,
attributes);
@@ -1583,7 +1678,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// Restore global object from context (in case of GC) and continue
// with setting the value.
HandleScope handle_scope(isolate);
- Handle<JSObject> global(qml_mode?isolate->context()->qml_global():isolate->context()->global());
+ Handle<JSObject> global(qml_mode ? isolate->context()->qml_global_object()
+ : isolate->context()->global_object());
// BUG 1213575: Handle the case where we have to set a read-only
// property through an interceptor and only do it if it's
@@ -1600,14 +1696,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// constant. For now, we determine this by checking if the
// current value is the hole.
// Strict mode handling not needed (const is disallowed in strict mode).
- PropertyType type = lookup.type();
- if (type == FIELD) {
+ if (lookup.IsField()) {
FixedArray* properties = global->properties();
int index = lookup.GetFieldIndex();
if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
properties->set(index, *value);
}
- } else if (type == NORMAL) {
+ } else if (lookup.IsNormal()) {
if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
!lookup.IsReadOnly()) {
global->SetNormalizedProperty(&lookup, *value);
@@ -1615,7 +1710,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
} else {
// Ignore re-initialization of constants that have already been
// assigned a function value.
- ASSERT(lookup.IsReadOnly() && type == CONSTANT_FUNCTION);
+ ASSERT(lookup.IsReadOnly() && lookup.IsConstantFunction());
}
// Use the set value as the result of the operation.
@@ -1630,7 +1725,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
Handle<Object> value(args[0], isolate);
ASSERT(!value->IsTheHole());
- // Initializations are always done in a function or global context.
+ // Initializations are always done in a function or native context.
RUNTIME_ASSERT(args[1]->IsContext());
Handle<Context> context(Context::cast(args[1])->declaration_context());
@@ -1658,7 +1753,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
// global object.
if (attributes == ABSENT) {
Handle<JSObject> global = Handle<JSObject>(
- isolate->context()->global());
+ isolate->context()->global_object());
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
@@ -1691,14 +1786,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
ASSERT(lookup.IsFound()); // the property was declared
ASSERT(lookup.IsReadOnly()); // and it was declared as read-only
- PropertyType type = lookup.type();
- if (type == FIELD) {
+ if (lookup.IsField()) {
FixedArray* properties = object->properties();
int index = lookup.GetFieldIndex();
if (properties->get(index)->IsTheHole()) {
properties->set(index, *value);
}
- } else if (type == NORMAL) {
+ } else if (lookup.IsNormal()) {
if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
object->SetNormalizedProperty(&lookup, *value);
}
@@ -1745,7 +1839,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
// length of a string, i.e. it is always a Smi. We check anyway for security.
CONVERT_SMI_ARG_CHECKED(index, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- RUNTIME_ASSERT(last_match_info->HasFastElements());
+ RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
isolate->counters()->regexp_entry_runtime()->Increment();
@@ -1780,7 +1874,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
AssertNoAllocation no_gc;
HandleScope scope(isolate);
reinterpret_cast<HeapObject*>(new_object)->
- set_map(isolate->global_context()->regexp_result_map());
+ set_map(isolate->native_context()->regexp_result_map());
}
JSArray* array = JSArray::cast(new_object);
array->set_properties(isolate->heap()->empty_fixed_array());
@@ -1932,9 +2026,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
// Returns undefined for strict or native functions, or
// the associated global receiver for "normal" functions.
- Context* global_context =
- function->context()->global()->global_context();
- return global_context->global()->global_receiver();
+ Context* native_context =
+ function->context()->global_object()->native_context();
+ return native_context->global_object()->global_receiver();
}
@@ -1949,11 +2043,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
// Get the RegExp function from the context in the literals array.
// This is the RegExp function from the context in which the
// function was created. We do not use the RegExp function from the
- // current global context because this might be the RegExp function
+ // current native context because this might be the RegExp function
// from another context which we should not have access to.
Handle<JSFunction> constructor =
Handle<JSFunction>(
- JSFunction::GlobalContextFromLiterals(*literals)->regexp_function());
+ JSFunction::NativeContextFromLiterals(*literals)->regexp_function());
// Compute the regular expression literal.
bool has_pending_exception;
Handle<Object> regexp =
@@ -2011,8 +2105,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
- Object* obj = f->RemovePrototype();
- if (obj->IsFailure()) return obj;
+ f->RemovePrototype();
return isolate->heap()->undefined_value();
}
@@ -2105,40 +2198,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
RUNTIME_ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- MaybeObject* maybe_name =
- isolate->heap()->AllocateStringFromAscii(CStrVector("prototype"));
- String* name;
- if (!maybe_name->To(&name)) return maybe_name;
+ String* name = isolate->heap()->prototype_symbol();
if (function->HasFastProperties()) {
// Construct a new field descriptor with updated attributes.
DescriptorArray* instance_desc = function->map()->instance_descriptors();
- int index = instance_desc->Search(name);
+
+ int index = instance_desc->SearchWithCache(name, function->map());
ASSERT(index != DescriptorArray::kNotFound);
PropertyDetails details = instance_desc->GetDetails(index);
+
CallbacksDescriptor new_desc(name,
instance_desc->GetValue(index),
static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
- details.index());
- // Construct a new field descriptors array containing the new descriptor.
- Object* descriptors_unchecked;
- { MaybeObject* maybe_descriptors_unchecked =
- instance_desc->CopyInsert(&new_desc, REMOVE_TRANSITIONS);
- if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
- return maybe_descriptors_unchecked;
- }
- }
- DescriptorArray* new_descriptors =
- DescriptorArray::cast(descriptors_unchecked);
+ details.descriptor_index());
+
// Create a new map featuring the new field descriptors array.
- Object* map_unchecked;
- { MaybeObject* maybe_map_unchecked = function->map()->CopyDropDescriptors();
- if (!maybe_map_unchecked->ToObject(&map_unchecked)) {
- return maybe_map_unchecked;
- }
- }
- Map* new_map = Map::cast(map_unchecked);
- new_map->set_instance_descriptors(new_descriptors);
+ Map* new_map;
+ MaybeObject* maybe_map =
+ function->map()->CopyReplaceDescriptor(
+ instance_desc, &new_desc, index, OMIT_TRANSITION);
+ if (!maybe_map->To(&new_map)) return maybe_map;
+
function->set_map(new_map);
} else { // Dictionary properties.
// Directly manipulate the property details.
@@ -2148,7 +2229,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
PropertyDetails new_details(
static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
details.type(),
- details.index());
+ details.dictionary_index());
function->property_dictionary()->DetailsAtPut(entry, new_details);
}
return function;
@@ -2180,60 +2261,58 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
Handle<Object> code = args.at<Object>(1);
- Handle<Context> context(target->context());
-
- if (!code->IsNull()) {
- RUNTIME_ASSERT(code->IsJSFunction());
- Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
- Handle<SharedFunctionInfo> shared(fun->shared());
-
- if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
- return Failure::Exception();
- }
- // Since we don't store the source for this we should never
- // optimize this.
- shared->code()->set_optimizable(false);
- // Set the code, scope info, formal parameter count,
- // and the length of the target function.
- target->shared()->set_code(shared->code());
- target->ReplaceCode(shared->code());
- target->shared()->set_scope_info(shared->scope_info());
- target->shared()->set_length(shared->length());
- target->shared()->set_formal_parameter_count(
- shared->formal_parameter_count());
- // Set the source code of the target function to undefined.
- // SetCode is only used for built-in constructors like String,
- // Array, and Object, and some web code
- // doesn't like seeing source code for constructors.
- target->shared()->set_script(isolate->heap()->undefined_value());
- target->shared()->code()->set_optimizable(false);
- // Clear the optimization hints related to the compiled code as these are no
- // longer valid when the code is overwritten.
- target->shared()->ClearThisPropertyAssignmentsInfo();
- context = Handle<Context>(fun->context());
-
- // Make sure we get a fresh copy of the literal vector to avoid
- // cross context contamination.
- int number_of_literals = fun->NumberOfLiterals();
- Handle<FixedArray> literals =
- isolate->factory()->NewFixedArray(number_of_literals, TENURED);
- if (number_of_literals > 0) {
- // Insert the object, regexp and array functions in the literals
- // array prefix. These are the functions that will be used when
- // creating object, regexp and array literals.
- literals->set(JSFunction::kLiteralGlobalContextIndex,
- context->global_context());
- }
- target->set_literals(*literals);
- target->set_next_function_link(isolate->heap()->undefined_value());
-
- if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
- isolate->logger()->LogExistingFunction(
- shared, Handle<Code>(shared->code()));
- }
+ if (code->IsNull()) return *target;
+ RUNTIME_ASSERT(code->IsJSFunction());
+ Handle<JSFunction> source = Handle<JSFunction>::cast(code);
+ Handle<SharedFunctionInfo> target_shared(target->shared());
+ Handle<SharedFunctionInfo> source_shared(source->shared());
+
+ if (!JSFunction::EnsureCompiled(source, KEEP_EXCEPTION)) {
+ return Failure::Exception();
}
+ // Set the code, scope info, formal parameter count, and the length
+ // of the target shared function info. Set the source code of the
+ // target function to undefined. SetCode is only used for built-in
+ // constructors like String, Array, and Object, and some web code
+ // doesn't like seeing source code for constructors.
+ target_shared->set_code(source_shared->code());
+ target_shared->set_scope_info(source_shared->scope_info());
+ target_shared->set_length(source_shared->length());
+ target_shared->set_formal_parameter_count(
+ source_shared->formal_parameter_count());
+ target_shared->set_script(isolate->heap()->undefined_value());
+
+ // Since we don't store the source we should never optimize this.
+ target_shared->code()->set_optimizable(false);
+
+ // Clear the optimization hints related to the compiled code as these
+ // are no longer valid when the code is overwritten.
+ target_shared->ClearThisPropertyAssignmentsInfo();
+
+ // Set the code of the target function.
+ target->ReplaceCode(source_shared->code());
+ ASSERT(target->next_function_link()->IsUndefined());
+
+ // Make sure we get a fresh copy of the literal vector to avoid cross
+ // context contamination.
+ Handle<Context> context(source->context());
+ int number_of_literals = source->NumberOfLiterals();
+ Handle<FixedArray> literals =
+ isolate->factory()->NewFixedArray(number_of_literals, TENURED);
+ if (number_of_literals > 0) {
+ literals->set(JSFunction::kLiteralNativeContextIndex,
+ context->native_context());
+ }
target->set_context(*context);
+ target->set_literals(*literals);
+
+ if (isolate->logger()->is_logging_code_events() ||
+ CpuProfiler::is_profiling(isolate)) {
+ isolate->logger()->LogExistingFunction(
+ source_shared, Handle<Code>(source_shared->code()));
+ }
+
return *target;
}
@@ -2266,19 +2345,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, subject, 0);
- Object* index = args[1];
- RUNTIME_ASSERT(index->IsNumber());
-
- uint32_t i = 0;
- if (index->IsSmi()) {
- int value = Smi::cast(index)->value();
- if (value < 0) return isolate->heap()->nan_value();
- i = value;
- } else {
- ASSERT(index->IsHeapNumber());
- double value = HeapNumber::cast(index)->value();
- i = static_cast<uint32_t>(DoubleToInteger(value));
- }
+ CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
@@ -2372,18 +2439,13 @@ class FixedArrayBuilder {
return array_->length();
}
- Handle<JSArray> ToJSArray() {
- Handle<JSArray> result_array = FACTORY->NewJSArrayWithElements(array_);
- result_array->set_length(Smi::FromInt(length_));
- return result_array;
- }
-
Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
FACTORY->SetContent(target_array, array_);
target_array->set_length(Smi::FromInt(length_));
return target_array;
}
+
private:
Handle<FixedArray> array_;
int length_;
@@ -2502,10 +2564,6 @@ class ReplacementStringBuilder {
character_count_ += by;
}
- Handle<JSArray> GetParts() {
- return array_builder_.ToJSArray();
- }
-
private:
Handle<SeqAsciiString> NewRawAsciiString(int length) {
return heap_->isolate()->factory()->NewRawAsciiString(length);
@@ -2533,26 +2591,26 @@ class ReplacementStringBuilder {
class CompiledReplacement {
public:
- CompiledReplacement()
- : parts_(1), replacement_substrings_(0), simple_hint_(false) {}
+ explicit CompiledReplacement(Zone* zone)
+ : parts_(1, zone), replacement_substrings_(0, zone), zone_(zone) {}
- void Compile(Handle<String> replacement,
+ // Return whether the replacement is simple.
+ bool Compile(Handle<String> replacement,
int capture_count,
int subject_length);
+ // Use Apply only if Compile returned false.
void Apply(ReplacementStringBuilder* builder,
int match_from,
int match_to,
- Handle<JSArray> last_match_info);
+ int32_t* match);
// Number of distinct parts of the replacement pattern.
int parts() {
return parts_.length();
}
- bool simple_hint() {
- return simple_hint_;
- }
+ Zone* zone() const { return zone_; }
private:
enum PartType {
@@ -2612,10 +2670,11 @@ class CompiledReplacement {
};
template<typename Char>
- static bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
- Vector<Char> characters,
- int capture_count,
- int subject_length) {
+ bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
+ Vector<Char> characters,
+ int capture_count,
+ int subject_length,
+ Zone* zone) {
int length = characters.length();
int last = 0;
for (int i = 0; i < length; i++) {
@@ -2630,7 +2689,8 @@ class CompiledReplacement {
case '$':
if (i > last) {
// There is a substring before. Include the first "$".
- parts->Add(ReplacementPart::ReplacementSubString(last, next_index));
+ parts->Add(ReplacementPart::ReplacementSubString(last, next_index),
+ zone);
last = next_index + 1; // Continue after the second "$".
} else {
// Let the next substring start with the second "$".
@@ -2640,25 +2700,25 @@ class CompiledReplacement {
break;
case '`':
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
}
- parts->Add(ReplacementPart::SubjectPrefix());
+ parts->Add(ReplacementPart::SubjectPrefix(), zone);
i = next_index;
last = i + 1;
break;
case '\'':
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
}
- parts->Add(ReplacementPart::SubjectSuffix(subject_length));
+ parts->Add(ReplacementPart::SubjectSuffix(subject_length), zone);
i = next_index;
last = i + 1;
break;
case '&':
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
}
- parts->Add(ReplacementPart::SubjectMatch());
+ parts->Add(ReplacementPart::SubjectMatch(), zone);
i = next_index;
last = i + 1;
break;
@@ -2691,10 +2751,10 @@ class CompiledReplacement {
}
if (capture_ref > 0) {
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i));
+ parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
}
ASSERT(capture_ref <= capture_count);
- parts->Add(ReplacementPart::SubjectCapture(capture_ref));
+ parts->Add(ReplacementPart::SubjectCapture(capture_ref), zone);
last = next_index + 1;
}
i = next_index;
@@ -2708,10 +2768,10 @@ class CompiledReplacement {
}
if (length > last) {
if (last == 0) {
- parts->Add(ReplacementPart::ReplacementString());
+ // Replacement is simple. Do not use Apply to do the replacement.
return true;
} else {
- parts->Add(ReplacementPart::ReplacementSubString(last, length));
+ parts->Add(ReplacementPart::ReplacementSubString(last, length), zone);
}
}
return false;
@@ -2719,30 +2779,35 @@ class CompiledReplacement {
ZoneList<ReplacementPart> parts_;
ZoneList<Handle<String> > replacement_substrings_;
- bool simple_hint_;
+ Zone* zone_;
};
-void CompiledReplacement::Compile(Handle<String> replacement,
+bool CompiledReplacement::Compile(Handle<String> replacement,
int capture_count,
int subject_length) {
{
AssertNoAllocation no_alloc;
String::FlatContent content = replacement->GetFlatContent();
ASSERT(content.IsFlat());
+ bool simple = false;
if (content.IsAscii()) {
- simple_hint_ = ParseReplacementPattern(&parts_,
- content.ToAsciiVector(),
- capture_count,
- subject_length);
+ simple = ParseReplacementPattern(&parts_,
+ content.ToAsciiVector(),
+ capture_count,
+ subject_length,
+ zone());
} else {
ASSERT(content.IsTwoByte());
- simple_hint_ = ParseReplacementPattern(&parts_,
- content.ToUC16Vector(),
- capture_count,
- subject_length);
+ simple = ParseReplacementPattern(&parts_,
+ content.ToUC16Vector(),
+ capture_count,
+ subject_length,
+ zone());
}
+ if (simple) return true;
}
+
Isolate* isolate = replacement->GetIsolate();
// Find substrings of replacement string and create them as String objects.
int substring_index = 0;
@@ -2752,23 +2817,25 @@ void CompiledReplacement::Compile(Handle<String> replacement,
int from = -tag;
int to = parts_[i].data;
replacement_substrings_.Add(
- isolate->factory()->NewSubString(replacement, from, to));
+ isolate->factory()->NewSubString(replacement, from, to), zone());
parts_[i].tag = REPLACEMENT_SUBSTRING;
parts_[i].data = substring_index;
substring_index++;
} else if (tag == REPLACEMENT_STRING) {
- replacement_substrings_.Add(replacement);
+ replacement_substrings_.Add(replacement, zone());
parts_[i].data = substring_index;
substring_index++;
}
}
+ return false;
}
void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
int match_from,
int match_to,
- Handle<JSArray> last_match_info) {
+ int32_t* match) {
+ ASSERT_LT(0, parts_.length());
for (int i = 0, n = parts_.length(); i < n; i++) {
ReplacementPart part = parts_[i];
switch (part.tag) {
@@ -2784,9 +2851,8 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
}
case SUBJECT_CAPTURE: {
int capture = part.data;
- FixedArray* match_info = FixedArray::cast(last_match_info->elements());
- int from = RegExpImpl::GetCapture(match_info, capture * 2);
- int to = RegExpImpl::GetCapture(match_info, capture * 2 + 1);
+ int from = match[capture * 2];
+ int to = match[capture * 2 + 1];
if (from >= 0 && to > from) {
builder->AddSubjectSlice(from, to);
}
@@ -2806,7 +2872,8 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
void FindAsciiStringIndices(Vector<const char> subject,
char pattern,
ZoneList<int>* indices,
- unsigned int limit) {
+ unsigned int limit,
+ Zone* zone) {
ASSERT(limit > 0);
// Collect indices of pattern in subject using memchr.
// Stop after finding at most limit values.
@@ -2817,7 +2884,7 @@ void FindAsciiStringIndices(Vector<const char> subject,
pos = reinterpret_cast<const char*>(
memchr(pos, pattern, subject_end - pos));
if (pos == NULL) return;
- indices->Add(static_cast<int>(pos - subject_start));
+ indices->Add(static_cast<int>(pos - subject_start), zone);
pos++;
limit--;
}
@@ -2829,7 +2896,8 @@ void FindStringIndices(Isolate* isolate,
Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
ZoneList<int>* indices,
- unsigned int limit) {
+ unsigned int limit,
+ Zone* zone) {
ASSERT(limit > 0);
// Collect indices of pattern in subject.
// Stop after finding at most limit values.
@@ -2839,7 +2907,7 @@ void FindStringIndices(Isolate* isolate,
while (limit > 0) {
index = search.Search(subject, index);
if (index < 0) return;
- indices->Add(index);
+ indices->Add(index, zone);
index += pattern_length;
limit--;
}
@@ -2850,7 +2918,8 @@ void FindStringIndicesDispatch(Isolate* isolate,
String* subject,
String* pattern,
ZoneList<int>* indices,
- unsigned int limit) {
+ unsigned int limit,
+ Zone* zone) {
{
AssertNoAllocation no_gc;
String::FlatContent subject_content = subject->GetFlatContent();
@@ -2865,20 +2934,23 @@ void FindStringIndicesDispatch(Isolate* isolate,
FindAsciiStringIndices(subject_vector,
pattern_vector[0],
indices,
- limit);
+ limit,
+ zone);
} else {
FindStringIndices(isolate,
subject_vector,
pattern_vector,
indices,
- limit);
+ limit,
+ zone);
}
} else {
FindStringIndices(isolate,
subject_vector,
pattern_content.ToUC16Vector(),
indices,
- limit);
+ limit,
+ zone);
}
} else {
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
@@ -2887,85 +2959,21 @@ void FindStringIndicesDispatch(Isolate* isolate,
subject_vector,
pattern_content.ToAsciiVector(),
indices,
- limit);
+ limit,
+ zone);
} else {
FindStringIndices(isolate,
subject_vector,
pattern_content.ToUC16Vector(),
indices,
- limit);
- }
- }
- }
-}
-
-
-// Two smis before and after the match, for very long strings.
-const int kMaxBuilderEntriesPerRegExpMatch = 5;
-
-
-static void SetLastMatchInfoNoCaptures(Handle<String> subject,
- Handle<JSArray> last_match_info,
- int match_start,
- int match_end) {
- // Fill last_match_info with a single capture.
- last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead);
- AssertNoAllocation no_gc;
- FixedArray* elements = FixedArray::cast(last_match_info->elements());
- RegExpImpl::SetLastCaptureCount(elements, 2);
- RegExpImpl::SetLastInput(elements, *subject);
- RegExpImpl::SetLastSubject(elements, *subject);
- RegExpImpl::SetCapture(elements, 0, match_start);
- RegExpImpl::SetCapture(elements, 1, match_end);
-}
-
-
-template <typename SubjectChar, typename PatternChar>
-static bool SearchStringMultiple(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- String* pattern_string,
- FixedArrayBuilder* builder,
- int* match_pos) {
- int pos = *match_pos;
- int subject_length = subject.length();
- int pattern_length = pattern.length();
- int max_search_start = subject_length - pattern_length;
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
- while (pos <= max_search_start) {
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- // Position of end of previous match.
- int match_end = pos + pattern_length;
- int new_pos = search.Search(subject, match_end);
- if (new_pos >= 0) {
- // A match.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- new_pos);
+ limit,
+ zone);
}
- pos = new_pos;
- builder->Add(pattern_string);
- } else {
- break;
}
}
-
- if (pos < max_search_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- pos + pattern_length,
- subject_length);
- }
- *match_pos = pos;
- return true;
}
-
-
template<typename ResultSeqString>
MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
Isolate* isolate,
@@ -2976,8 +2984,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- ZoneScope zone_space(isolate, DELETE_ON_EXIT);
- ZoneList<int> indices(8);
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope zone_space(zone, DELETE_ON_EXIT);
+ ZoneList<int> indices(8, zone);
ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
String* pattern =
String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -2985,12 +2994,21 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
int pattern_len = pattern->length();
int replacement_len = replacement->length();
- FindStringIndicesDispatch(isolate, *subject, pattern, &indices, 0xffffffff);
+ FindStringIndicesDispatch(
+ isolate, *subject, pattern, &indices, 0xffffffff, zone);
int matches = indices.length();
if (matches == 0) return *subject;
- int result_len = (replacement_len - pattern_len) * matches + subject_len;
+ // Detect integer overflow.
+ int64_t result_len_64 =
+ (static_cast<int64_t>(replacement_len) -
+ static_cast<int64_t>(pattern_len)) *
+ static_cast<int64_t>(matches) +
+ static_cast<int64_t>(subject_len);
+ if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException();
+ int result_len = static_cast<int>(result_len_64);
+
int subject_pos = 0;
int result_pos = 0;
@@ -3032,10 +3050,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
subject_len);
}
- SetLastMatchInfoNoCaptures(subject,
- last_match_info,
- indices.at(matches - 1),
- indices.at(matches - 1) + pattern_len);
+ int32_t match_indices[] = { indices.at(matches - 1),
+ indices.at(matches - 1) + pattern_len };
+ RegExpImpl::SetLastMatchInfo(last_match_info, subject, 0, match_indices);
return *result;
}
@@ -3043,133 +3060,101 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
Isolate* isolate,
- String* subject,
- JSRegExp* regexp,
- String* replacement,
- JSArray* last_match_info) {
+ Handle<String> subject,
+ Handle<JSRegExp> regexp,
+ Handle<String> replacement,
+ Handle<JSArray> last_match_info) {
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- HandleScope handles(isolate);
-
- int length = subject->length();
- Handle<String> subject_handle(subject);
- Handle<JSRegExp> regexp_handle(regexp);
- Handle<String> replacement_handle(replacement);
- Handle<JSArray> last_match_info_handle(last_match_info);
- Handle<Object> match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- 0,
- last_match_info_handle);
- if (match.is_null()) {
- return Failure::Exception();
- }
- if (match->IsNull()) {
- return *subject_handle;
- }
-
- int capture_count = regexp_handle->CaptureCount();
+ bool is_global = regexp->GetFlags().is_global();
+ int capture_count = regexp->CaptureCount();
+ int subject_length = subject->length();
// CompiledReplacement uses zone allocation.
- ZoneScope zone(isolate, DELETE_ON_EXIT);
- CompiledReplacement compiled_replacement;
- compiled_replacement.Compile(replacement_handle,
- capture_count,
- length);
-
- bool is_global = regexp_handle->GetFlags().is_global();
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope zonescope(zone, DELETE_ON_EXIT);
+ CompiledReplacement compiled_replacement(zone);
+ bool simple_replace = compiled_replacement.Compile(replacement,
+ capture_count,
+ subject_length);
// Shortcut for simple non-regexp global replacements
if (is_global &&
- regexp_handle->TypeTag() == JSRegExp::ATOM &&
- compiled_replacement.simple_hint()) {
- if (subject_handle->HasOnlyAsciiChars() &&
- replacement_handle->HasOnlyAsciiChars()) {
+ regexp->TypeTag() == JSRegExp::ATOM &&
+ simple_replace) {
+ if (subject->HasOnlyAsciiChars() && replacement->HasOnlyAsciiChars()) {
return StringReplaceAtomRegExpWithString<SeqAsciiString>(
- isolate,
- subject_handle,
- regexp_handle,
- replacement_handle,
- last_match_info_handle);
+ isolate, subject, regexp, replacement, last_match_info);
} else {
return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
- isolate,
- subject_handle,
- regexp_handle,
- replacement_handle,
- last_match_info_handle);
+ isolate, subject, regexp, replacement, last_match_info);
}
}
+ RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate);
+ if (global_cache.HasException()) return Failure::Exception();
+
+ int32_t* current_match = global_cache.FetchNext();
+ if (current_match == NULL) {
+ if (global_cache.HasException()) return Failure::Exception();
+ return *subject;
+ }
+
// Guessing the number of parts that the final result string is built
// from. Global regexps can match any number of times, so we guess
// conservatively.
int expected_parts =
(compiled_replacement.parts() + 1) * (is_global ? 4 : 1) + 1;
ReplacementStringBuilder builder(isolate->heap(),
- subject_handle,
+ subject,
expected_parts);
- // Index of end of last match.
- int prev = 0;
-
// Number of parts added by compiled replacement plus preceeding
// string and possibly suffix after last match. It is possible for
// all components to use two elements when encoded as two smis.
const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
- bool matched = true;
+
+ int prev = 0;
+
do {
- ASSERT(last_match_info_handle->HasFastElements());
- // Increase the capacity of the builder before entering local handle-scope,
- // so its internal buffer can safely allocate a new handle if it grows.
builder.EnsureCapacity(parts_added_per_loop);
- HandleScope loop_scope(isolate);
- int start, end;
- {
- AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array =
- FixedArray::cast(last_match_info_handle->elements());
-
- ASSERT_EQ(capture_count * 2 + 2,
- RegExpImpl::GetLastCaptureCount(match_info_array));
- start = RegExpImpl::GetCapture(match_info_array, 0);
- end = RegExpImpl::GetCapture(match_info_array, 1);
- }
+ int start = current_match[0];
+ int end = current_match[1];
if (prev < start) {
builder.AddSubjectSlice(prev, start);
}
- compiled_replacement.Apply(&builder,
- start,
- end,
- last_match_info_handle);
+
+ if (simple_replace) {
+ builder.AddString(replacement);
+ } else {
+ compiled_replacement.Apply(&builder,
+ start,
+ end,
+ current_match);
+ }
prev = end;
// Only continue checking for global regexps.
if (!is_global) break;
- // Continue from where the match ended, unless it was an empty match.
- int next = end;
- if (start == end) {
- next = end + 1;
- if (next > length) break;
- }
+ current_match = global_cache.FetchNext();
+ } while (current_match != NULL);
- match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- next,
- last_match_info_handle);
- if (match.is_null()) {
- return Failure::Exception();
- }
- matched = !match->IsNull();
- } while (matched);
+ if (global_cache.HasException()) return Failure::Exception();
- if (prev < length) {
- builder.AddSubjectSlice(prev, length);
+ if (prev < subject_length) {
+ builder.EnsureCapacity(2);
+ builder.AddSubjectSlice(prev, subject_length);
}
+ RegExpImpl::SetLastMatchInfo(last_match_info,
+ subject,
+ capture_count,
+ global_cache.LastSuccessfulMatch());
+
return *(builder.ToString());
}
@@ -3177,66 +3162,51 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
template <typename ResultSeqString>
MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
Isolate* isolate,
- String* subject,
- JSRegExp* regexp,
- JSArray* last_match_info) {
+ Handle<String> subject,
+ Handle<JSRegExp> regexp,
+ Handle<JSArray> last_match_info) {
ASSERT(subject->IsFlat());
- HandleScope handles(isolate);
-
- Handle<String> subject_handle(subject);
- Handle<JSRegExp> regexp_handle(regexp);
- Handle<JSArray> last_match_info_handle(last_match_info);
+ bool is_global = regexp->GetFlags().is_global();
// Shortcut for simple non-regexp global replacements
- if (regexp_handle->GetFlags().is_global() &&
- regexp_handle->TypeTag() == JSRegExp::ATOM) {
- Handle<String> empty_string_handle(HEAP->empty_string());
- if (subject_handle->HasOnlyAsciiChars()) {
+ if (is_global &&
+ regexp->TypeTag() == JSRegExp::ATOM) {
+ Handle<String> empty_string(HEAP->empty_string());
+ if (subject->HasOnlyAsciiChars()) {
return StringReplaceAtomRegExpWithString<SeqAsciiString>(
isolate,
- subject_handle,
- regexp_handle,
- empty_string_handle,
- last_match_info_handle);
+ subject,
+ regexp,
+ empty_string,
+ last_match_info);
} else {
return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
isolate,
- subject_handle,
- regexp_handle,
- empty_string_handle,
- last_match_info_handle);
+ subject,
+ regexp,
+ empty_string,
+ last_match_info);
}
}
- Handle<Object> match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- 0,
- last_match_info_handle);
- if (match.is_null()) return Failure::Exception();
- if (match->IsNull()) return *subject_handle;
-
- ASSERT(last_match_info_handle->HasFastElements());
-
- int start, end;
- {
- AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array =
- FixedArray::cast(last_match_info_handle->elements());
+ RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate);
+ if (global_cache.HasException()) return Failure::Exception();
- start = RegExpImpl::GetCapture(match_info_array, 0);
- end = RegExpImpl::GetCapture(match_info_array, 1);
+ int32_t* current_match = global_cache.FetchNext();
+ if (current_match == NULL) {
+ if (global_cache.HasException()) return Failure::Exception();
+ return *subject;
}
- bool global = regexp_handle->GetFlags().is_global();
+ int start = current_match[0];
+ int end = current_match[1];
+ int capture_count = regexp->CaptureCount();
+ int subject_length = subject->length();
- if (start == end && !global) return *subject_handle;
+ int new_length = subject_length - (end - start);
+ if (new_length == 0) return isolate->heap()->empty_string();
- int length = subject_handle->length();
- int new_length = length - (end - start);
- if (new_length == 0) {
- return isolate->heap()->empty_string();
- }
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
answer = Handle<ResultSeqString>::cast(
@@ -3246,73 +3216,55 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
isolate->factory()->NewRawTwoByteString(new_length));
}
- // If the regexp isn't global, only match once.
- if (!global) {
- if (start > 0) {
- String::WriteToFlat(*subject_handle,
- answer->GetChars(),
- 0,
- start);
- }
- if (end < length) {
- String::WriteToFlat(*subject_handle,
- answer->GetChars() + start,
- end,
- length);
+ if (!is_global) {
+ RegExpImpl::SetLastMatchInfo(
+ last_match_info, subject, capture_count, current_match);
+ if (start == end) {
+ return *subject;
+ } else {
+ if (start > 0) {
+ String::WriteToFlat(*subject, answer->GetChars(), 0, start);
+ }
+ if (end < subject_length) {
+ String::WriteToFlat(
+ *subject, answer->GetChars() + start, end, subject_length);
+ }
+ return *answer;
}
- return *answer;
}
- int prev = 0; // Index of end of last match.
- int next = 0; // Start of next search (prev unless last match was empty).
+ int prev = 0;
int position = 0;
do {
+ start = current_match[0];
+ end = current_match[1];
if (prev < start) {
// Add substring subject[prev;start] to answer string.
- String::WriteToFlat(*subject_handle,
- answer->GetChars() + position,
- prev,
- start);
+ String::WriteToFlat(
+ *subject, answer->GetChars() + position, prev, start);
position += start - prev;
}
prev = end;
- next = end;
- // Continue from where the match ended, unless it was an empty match.
- if (start == end) {
- next++;
- if (next > length) break;
- }
- match = RegExpImpl::Exec(regexp_handle,
- subject_handle,
- next,
- last_match_info_handle);
- if (match.is_null()) return Failure::Exception();
- if (match->IsNull()) break;
-
- ASSERT(last_match_info_handle->HasFastElements());
- HandleScope loop_scope(isolate);
- {
- AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array =
- FixedArray::cast(last_match_info_handle->elements());
- start = RegExpImpl::GetCapture(match_info_array, 0);
- end = RegExpImpl::GetCapture(match_info_array, 1);
- }
- } while (true);
- if (prev < length) {
+ current_match = global_cache.FetchNext();
+ } while (current_match != NULL);
+
+ if (global_cache.HasException()) return Failure::Exception();
+
+ RegExpImpl::SetLastMatchInfo(last_match_info,
+ subject,
+ capture_count,
+ global_cache.LastSuccessfulMatch());
+
+ if (prev < subject_length) {
// Add substring subject[prev;length] to answer string.
- String::WriteToFlat(*subject_handle,
- answer->GetChars() + position,
- prev,
- length);
- position += length - prev;
+ String::WriteToFlat(
+ *subject, answer->GetChars() + position, prev, subject_length);
+ position += subject_length - prev;
}
- if (position == 0) {
- return isolate->heap()->empty_string();
- }
+ if (position == 0) return isolate->heap()->empty_string();
// Shorten string and fill
int string_size = ResultSeqString::SizeFor(position);
@@ -3335,32 +3287,18 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(String, subject, 0);
- if (!subject->IsFlat()) {
- Object* flat_subject;
- { MaybeObject* maybe_flat_subject = subject->TryFlatten();
- if (!maybe_flat_subject->ToObject(&flat_subject)) {
- return maybe_flat_subject;
- }
- }
- subject = String::cast(flat_subject);
- }
+ HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(String, replacement, 2);
- if (!replacement->IsFlat()) {
- Object* flat_replacement;
- { MaybeObject* maybe_flat_replacement = replacement->TryFlatten();
- if (!maybe_flat_replacement->ToObject(&flat_replacement)) {
- return maybe_flat_replacement;
- }
- }
- replacement = String::cast(flat_replacement);
- }
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
+ if (!subject->IsFlat()) subject = FlattenGetString(subject);
- ASSERT(last_match_info->HasFastElements());
+ if (!replacement->IsFlat()) replacement = FlattenGetString(replacement);
+
+ ASSERT(last_match_info->HasFastObjectElements());
if (replacement->length() == 0) {
if (subject->HasOnlyAsciiChars()) {
@@ -3372,20 +3310,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
}
}
- return StringReplaceRegExpWithString(isolate,
- subject,
- regexp,
- replacement,
- last_match_info);
+ return StringReplaceRegExpWithString(
+ isolate, subject, regexp, replacement, last_match_info);
}
-Handle<String> Runtime::StringReplaceOneCharWithString(Isolate* isolate,
- Handle<String> subject,
- Handle<String> search,
- Handle<String> replace,
- bool* found,
- int recursion_limit) {
+Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
+ Handle<String> subject,
+ Handle<String> search,
+ Handle<String> replace,
+ bool* found,
+ int recursion_limit) {
if (recursion_limit == 0) return Handle<String>::null();
if (subject->IsConsString()) {
ConsString* cons = ConsString::cast(*subject);
@@ -3413,7 +3348,7 @@ Handle<String> Runtime::StringReplaceOneCharWithString(Isolate* isolate,
return subject;
} else {
- int index = StringMatch(isolate, subject, search, 0);
+ int index = Runtime::StringMatch(isolate, subject, search, 0);
if (index == -1) return subject;
*found = true;
Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
@@ -3436,20 +3371,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
// retry with a flattened subject string.
const int kRecursionLimit = 0x1000;
bool found = false;
- Handle<String> result =
- Runtime::StringReplaceOneCharWithString(isolate,
- subject,
- search,
- replace,
- &found,
- kRecursionLimit);
+ Handle<String> result = StringReplaceOneCharWithString(isolate,
+ subject,
+ search,
+ replace,
+ &found,
+ kRecursionLimit);
if (!result.is_null()) return *result;
- return *Runtime::StringReplaceOneCharWithString(isolate,
- FlattenGetString(subject),
- search,
- replace,
- &found,
- kRecursionLimit);
+ return *StringReplaceOneCharWithString(isolate,
+ FlattenGetString(subject),
+ search,
+ replace,
+ &found,
+ kRecursionLimit);
}
@@ -3680,8 +3614,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
} else {
CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
- start = FastD2I(from_number);
- end = FastD2I(to_number);
+ start = FastD2IChecked(from_number);
+ end = FastD2IChecked(to_number);
}
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
@@ -3699,45 +3633,45 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
HandleScope handles;
- Handle<Object> match = RegExpImpl::Exec(regexp, subject, 0, regexp_info);
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ if (global_cache.HasException()) return Failure::Exception();
- if (match.is_null()) {
- return Failure::Exception();
+ int capture_count = regexp->CaptureCount();
+
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope zone_space(zone, DELETE_ON_EXIT);
+ ZoneList<int> offsets(8, zone);
+
+ while (true) {
+ int32_t* match = global_cache.FetchNext();
+ if (match == NULL) break;
+ offsets.Add(match[0], zone); // start
+ offsets.Add(match[1], zone); // end
}
- if (match->IsNull()) {
+
+ if (global_cache.HasException()) return Failure::Exception();
+
+ if (offsets.length() == 0) {
+ // Not a single match.
return isolate->heap()->null_value();
}
- int length = subject->length();
- ZoneScope zone_space(isolate, DELETE_ON_EXIT);
- ZoneList<int> offsets(8);
- int start;
- int end;
- do {
- {
- AssertNoAllocation no_alloc;
- FixedArray* elements = FixedArray::cast(regexp_info->elements());
- start = Smi::cast(elements->get(RegExpImpl::kFirstCapture))->value();
- end = Smi::cast(elements->get(RegExpImpl::kFirstCapture + 1))->value();
- }
- offsets.Add(start);
- offsets.Add(end);
- if (start == end) if (++end > length) break;
- match = RegExpImpl::Exec(regexp, subject, end, regexp_info);
- if (match.is_null()) {
- return Failure::Exception();
- }
- } while (!match->IsNull());
+ RegExpImpl::SetLastMatchInfo(regexp_info,
+ subject,
+ capture_count,
+ global_cache.LastSuccessfulMatch());
+
int matches = offsets.length() / 2;
Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
- Handle<String> substring = isolate->factory()->
- NewSubString(subject, offsets.at(0), offsets.at(1));
+ Handle<String> substring =
+ isolate->factory()->NewSubString(subject, offsets.at(0), offsets.at(1));
elements->set(0, *substring);
- for (int i = 1; i < matches ; i++) {
+ for (int i = 1; i < matches; i++) {
+ HandleScope temp_scope(isolate);
int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1);
- Handle<String> substring = isolate->factory()->
- NewProperSubString(subject, from, to);
+ Handle<String> substring =
+ isolate->factory()->NewProperSubString(subject, from, to);
elements->set(i, *substring);
}
Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
@@ -3746,269 +3680,154 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
}
-static bool SearchStringMultiple(Isolate* isolate,
- Handle<String> subject,
- Handle<String> pattern,
- Handle<JSArray> last_match_info,
- FixedArrayBuilder* builder) {
- ASSERT(subject->IsFlat());
- ASSERT(pattern->IsFlat());
-
- // Treating as if a previous match was before first character.
- int match_pos = -pattern->length();
-
- for (;;) { // Break when search complete.
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- AssertNoAllocation no_gc;
- String::FlatContent subject_content = subject->GetFlatContent();
- String::FlatContent pattern_content = pattern->GetFlatContent();
- if (subject_content.IsAscii()) {
- Vector<const char> subject_vector = subject_content.ToAsciiVector();
- if (pattern_content.IsAscii()) {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern_content.ToAsciiVector(),
- *pattern,
- builder,
- &match_pos)) break;
- } else {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern_content.ToUC16Vector(),
- *pattern,
- builder,
- &match_pos)) break;
- }
- } else {
- Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
- if (pattern_content.IsAscii()) {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern_content.ToAsciiVector(),
- *pattern,
- builder,
- &match_pos)) break;
- } else {
- if (SearchStringMultiple(isolate,
- subject_vector,
- pattern_content.ToUC16Vector(),
- *pattern,
- builder,
- &match_pos)) break;
- }
- }
- }
-
- if (match_pos >= 0) {
- SetLastMatchInfoNoCaptures(subject,
- last_match_info,
- match_pos,
- match_pos + pattern->length());
- return true;
- }
- return false; // No matches at all.
-}
-
-
-static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
+// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
+// separate last match info. See comment on that function.
+template<bool has_capture>
+static MaybeObject* SearchRegExpMultiple(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
Handle<JSArray> last_match_array,
- FixedArrayBuilder* builder) {
+ Handle<JSArray> result_array) {
ASSERT(subject->IsFlat());
- int match_start = -1;
- int match_end = 0;
- int pos = 0;
- int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
- if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
+ ASSERT_NE(has_capture, regexp->CaptureCount() == 0);
- OffsetsVector registers(required_registers, isolate);
- Vector<int32_t> register_vector(registers.vector(), registers.length());
+ int capture_count = regexp->CaptureCount();
int subject_length = subject->length();
- bool first = true;
- for (;;) { // Break on failure, return on exception.
- RegExpImpl::IrregexpResult result =
- RegExpImpl::IrregexpExecOnce(regexp,
- subject,
- pos,
- register_vector);
- if (result == RegExpImpl::RE_SUCCESS) {
- match_start = register_vector[0];
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- match_start);
- }
- match_end = register_vector[1];
- HandleScope loop_scope(isolate);
- if (!first) {
- builder->Add(*isolate->factory()->NewProperSubString(subject,
- match_start,
- match_end));
- } else {
- builder->Add(*isolate->factory()->NewSubString(subject,
- match_start,
- match_end));
- }
- if (match_start != match_end) {
- pos = match_end;
- } else {
- pos = match_end + 1;
- if (pos > subject_length) break;
- }
- } else if (result == RegExpImpl::RE_FAILURE) {
- break;
- } else {
- ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
- return result;
- }
- first = false;
- }
+ static const int kMinLengthToCache = 0x1000;
- if (match_start >= 0) {
- if (match_end < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- subject_length);
+ if (subject_length > kMinLengthToCache) {
+ Handle<Object> cached_answer(RegExpResultsCache::Lookup(
+ isolate->heap(),
+ *subject,
+ regexp->data(),
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES));
+ if (*cached_answer != Smi::FromInt(0)) {
+ Handle<FixedArray> cached_fixed_array =
+ Handle<FixedArray>(FixedArray::cast(*cached_answer));
+ // The cache FixedArray is a COW-array and can therefore be reused.
+ isolate->factory()->SetContent(result_array, cached_fixed_array);
+ // The actual length of the result array is stored in the last element of
+ // the backing store (the backing FixedArray may have a larger capacity).
+ Object* cached_fixed_array_last_element =
+ cached_fixed_array->get(cached_fixed_array->length() - 1);
+ Smi* js_array_length = Smi::cast(cached_fixed_array_last_element);
+ result_array->set_length(js_array_length);
+ RegExpImpl::SetLastMatchInfo(
+ last_match_array, subject, capture_count, NULL);
+ return *result_array;
}
- SetLastMatchInfoNoCaptures(subject,
- last_match_array,
- match_start,
- match_end);
- return RegExpImpl::RE_SUCCESS;
- } else {
- return RegExpImpl::RE_FAILURE; // No matches at all.
}
-}
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ if (global_cache.HasException()) return Failure::Exception();
-// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
-// separate last match info. See comment on that function.
-static RegExpImpl::IrregexpResult SearchRegExpMultiple(
- Isolate* isolate,
- Handle<String> subject,
- Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_array,
- FixedArrayBuilder* builder) {
-
- ASSERT(subject->IsFlat());
- int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
- if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
-
- OffsetsVector registers(required_registers, isolate);
- Vector<int32_t> register_vector(registers.vector(), registers.length());
-
- RegExpImpl::IrregexpResult result =
- RegExpImpl::IrregexpExecOnce(regexp,
- subject,
- 0,
- register_vector);
+ Handle<FixedArray> result_elements;
+ if (result_array->HasFastObjectElements()) {
+ result_elements =
+ Handle<FixedArray>(FixedArray::cast(result_array->elements()));
+ }
+ if (result_elements.is_null() || result_elements->length() < 16) {
+ result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
+ }
- int capture_count = regexp->CaptureCount();
- int subject_length = subject->length();
+ FixedArrayBuilder builder(result_elements);
// Position to search from.
- int pos = 0;
- // End of previous match. Differs from pos if match was empty.
+ int match_start = -1;
int match_end = 0;
- if (result == RegExpImpl::RE_SUCCESS) {
- bool first = true;
- do {
- int match_start = register_vector[0];
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- if (match_end < match_start) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- match_start);
+ bool first = true;
+
+ // Two smis before and after the match, for very long strings.
+ static const int kMaxBuilderEntriesPerRegExpMatch = 5;
+
+ while (true) {
+ int32_t* current_match = global_cache.FetchNext();
+ if (current_match == NULL) break;
+ match_start = current_match[0];
+ builder.EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+ if (match_end < match_start) {
+ ReplacementStringBuilder::AddSubjectSlice(&builder,
+ match_end,
+ match_start);
+ }
+ match_end = current_match[1];
+ {
+ // Avoid accumulating new handles inside loop.
+ HandleScope temp_scope(isolate);
+ Handle<String> match;
+ if (!first) {
+ match = isolate->factory()->NewProperSubString(subject,
+ match_start,
+ match_end);
+ } else {
+ match = isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end);
+ first = false;
}
- match_end = register_vector[1];
- {
- // Avoid accumulating new handles inside loop.
- HandleScope temp_scope(isolate);
+ if (has_capture) {
// Arguments array to replace function is match, captures, index and
// subject, i.e., 3 + capture count in total.
Handle<FixedArray> elements =
isolate->factory()->NewFixedArray(3 + capture_count);
- Handle<String> match;
- if (!first) {
- match = isolate->factory()->NewProperSubString(subject,
- match_start,
- match_end);
- } else {
- match = isolate->factory()->NewSubString(subject,
- match_start,
- match_end);
- }
+
elements->set(0, *match);
for (int i = 1; i <= capture_count; i++) {
- int start = register_vector[i * 2];
+ int start = current_match[i * 2];
if (start >= 0) {
- int end = register_vector[i * 2 + 1];
+ int end = current_match[i * 2 + 1];
ASSERT(start <= end);
- Handle<String> substring;
- if (!first) {
- substring = isolate->factory()->NewProperSubString(subject,
- start,
- end);
- } else {
- substring = isolate->factory()->NewSubString(subject, start, end);
- }
+ Handle<String> substring =
+ isolate->factory()->NewSubString(subject, start, end);
elements->set(i, *substring);
} else {
- ASSERT(register_vector[i * 2 + 1] < 0);
+ ASSERT(current_match[i * 2 + 1] < 0);
elements->set(i, isolate->heap()->undefined_value());
}
}
elements->set(capture_count + 1, Smi::FromInt(match_start));
elements->set(capture_count + 2, *subject);
- builder->Add(*isolate->factory()->NewJSArrayWithElements(elements));
- }
-
- if (match_end > match_start) {
- pos = match_end;
+ builder.Add(*isolate->factory()->NewJSArrayWithElements(elements));
} else {
- pos = match_end + 1;
- if (pos > subject_length) {
- break;
- }
+ builder.Add(*match);
}
+ }
+ }
- result = RegExpImpl::IrregexpExecOnce(regexp,
- subject,
- pos,
- register_vector);
- first = false;
- } while (result == RegExpImpl::RE_SUCCESS);
-
- if (result != RegExpImpl::RE_EXCEPTION) {
- // Finished matching, with at least one match.
- if (match_end < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(builder,
- match_end,
- subject_length);
- }
+ if (global_cache.HasException()) return Failure::Exception();
- int last_match_capture_count = (capture_count + 1) * 2;
- int last_match_array_size =
- last_match_capture_count + RegExpImpl::kLastMatchOverhead;
- last_match_array->EnsureSize(last_match_array_size);
- AssertNoAllocation no_gc;
- FixedArray* elements = FixedArray::cast(last_match_array->elements());
- // We have to set this even though the rest of the last match array is
- // ignored.
- RegExpImpl::SetLastCaptureCount(elements, last_match_capture_count);
- // These are also read without consulting the override.
- RegExpImpl::SetLastSubject(elements, *subject);
- RegExpImpl::SetLastInput(elements, *subject);
- return RegExpImpl::RE_SUCCESS;
- }
- }
- // No matches at all, return failure or exception result directly.
- return result;
+ if (match_start >= 0) {
+ // Finished matching, with at least one match.
+ if (match_end < subject_length) {
+ ReplacementStringBuilder::AddSubjectSlice(&builder,
+ match_end,
+ subject_length);
+ }
+
+ RegExpImpl::SetLastMatchInfo(
+ last_match_array, subject, capture_count, NULL);
+
+ if (subject_length > kMinLengthToCache) {
+ // Store the length of the result array into the last element of the
+ // backing FixedArray.
+ builder.EnsureCapacity(1);
+ Handle<FixedArray> fixed_array = builder.array();
+ fixed_array->set(fixed_array->length() - 1,
+ Smi::FromInt(builder.length()));
+ // Cache the result and turn the FixedArray into a COW array.
+ RegExpResultsCache::Enter(isolate->heap(),
+ *subject,
+ regexp->data(),
+ *fixed_array,
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
+ }
+ return *builder.ToJSArray(result_array);
+ } else {
+ return isolate->heap()->null_value(); // No matches at all.
+ }
}
@@ -4025,49 +3844,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
- ASSERT(last_match_info->HasFastElements());
+ ASSERT(last_match_info->HasFastObjectElements());
ASSERT(regexp->GetFlags().is_global());
- Handle<FixedArray> result_elements;
- if (result_array->HasFastElements()) {
- result_elements =
- Handle<FixedArray>(FixedArray::cast(result_array->elements()));
- }
- if (result_elements.is_null() || result_elements->length() < 16) {
- result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
- }
- FixedArrayBuilder builder(result_elements);
- if (regexp->TypeTag() == JSRegExp::ATOM) {
- Handle<String> pattern(
- String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
- ASSERT(pattern->IsFlat());
- if (SearchStringMultiple(isolate, subject, pattern,
- last_match_info, &builder)) {
- return *builder.ToJSArray(result_array);
- }
- return isolate->heap()->null_value();
- }
-
- ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
-
- RegExpImpl::IrregexpResult result;
if (regexp->CaptureCount() == 0) {
- result = SearchRegExpNoCaptureMultiple(isolate,
- subject,
- regexp,
- last_match_info,
- &builder);
+ return SearchRegExpMultiple<false>(
+ isolate, subject, regexp, last_match_info, result_array);
} else {
- result = SearchRegExpMultiple(isolate,
- subject,
- regexp,
- last_match_info,
- &builder);
+ return SearchRegExpMultiple<true>(
+ isolate, subject, regexp, last_match_info, result_array);
}
- if (result == RegExpImpl::RE_SUCCESS) return *builder.ToJSArray(result_array);
- if (result == RegExpImpl::RE_FAILURE) return isolate->heap()->null_value();
- ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
- return Failure::Exception();
}
@@ -4122,7 +3908,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
return *isolate->factory()->infinity_symbol();
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2I(f_number);
+ int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 0);
char* str = DoubleToFixedCString(value, f);
MaybeObject* res =
@@ -4147,7 +3933,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
return *isolate->factory()->infinity_symbol();
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2I(f_number);
+ int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= -1 && f <= 20);
char* str = DoubleToExponentialCString(value, f);
MaybeObject* res =
@@ -4172,7 +3958,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
return *isolate->factory()->infinity_symbol();
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2I(f_number);
+ int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 1 && f <= 21);
char* str = DoubleToPrecisionCString(value, f);
MaybeObject* res =
@@ -4308,7 +4094,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// appropriate.
LookupResult result(isolate);
receiver->LocalLookup(key, &result);
- if (result.IsFound() && result.type() == FIELD) {
+ if (result.IsField()) {
int offset = result.GetFieldIndex();
keyed_lookup_cache->Update(receiver_map, key, offset);
return receiver->FastPropertyAt(offset);
@@ -4330,17 +4116,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// JSObject without a string key. If the key is a Smi, check for a
// definite out-of-bounds access to elements, which is a strong indicator
// that subsequent accesses will also call the runtime. Proactively
- // transition elements to FAST_ELEMENTS to avoid excessive boxing of
+ // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
// doubles for those future calls in the case that the elements would
// become FAST_DOUBLE_ELEMENTS.
Handle<JSObject> js_object(args.at<JSObject>(0));
ElementsKind elements_kind = js_object->GetElementsKind();
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
- elements_kind == FAST_DOUBLE_ELEMENTS) {
+ if (IsFastElementsKind(elements_kind) &&
+ !IsFastObjectElementsKind(elements_kind)) {
FixedArrayBase* elements = js_object->elements();
if (args.at<Smi>(1)->value() >= elements->length()) {
+ if (IsFastHoleyElementsKind(elements_kind)) {
+ elements_kind = FAST_HOLEY_ELEMENTS;
+ } else {
+ elements_kind = FAST_ELEMENTS;
+ }
MaybeObject* maybe_object = TransitionElements(js_object,
- FAST_ELEMENTS,
+ elements_kind,
isolate);
if (maybe_object->IsFailure()) return maybe_object;
}
@@ -4415,7 +4206,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
js_object->LocalLookupRealNamedProperty(*name, &result);
// Special case for callback properties.
- if (result.IsFound() && result.type() == CALLBACKS) {
+ if (result.IsPropertyCallbacks()) {
Object* callback = result.GetCallbackObject();
// To be compatible with Safari we do not change the value on API objects
// in Object.defineProperty(). Firefox disagrees here, and actually changes
@@ -4442,8 +4233,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// map. The current version of SetObjectProperty does not handle attributes
// correctly in the case where a property is a field and is reset with
// new attributes.
- if (result.IsProperty() &&
- (attr != result.GetAttributes() || result.type() == CALLBACKS)) {
+ if (result.IsFound() &&
+ (attr != result.GetAttributes() || result.IsPropertyCallbacks())) {
// New attributes - normalize to avoid writing to instance descriptor
if (js_object->IsJSGlobalProxy()) {
// Since the result is a property, the prototype will exist so
@@ -4466,6 +4257,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
}
+// Return property without being observable by accessors or interceptors.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, key, 1);
+ LookupResult lookup(isolate);
+ object->LookupRealNamedProperty(*key, &lookup);
+ if (!lookup.IsFound()) return isolate->heap()->undefined_value();
+ switch (lookup.type()) {
+ case NORMAL:
+ return lookup.holder()->GetNormalizedProperty(&lookup);
+ case FIELD:
+ return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
+ case CONSTANT_FUNCTION:
+ return lookup.GetConstantFunction();
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ return isolate->heap()->undefined_value();
+ case NONEXISTENT:
+ UNREACHABLE();
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -4510,8 +4328,10 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
return *value;
}
+ js_object->ValidateElements();
Handle<Object> result = JSObject::SetElement(
js_object, index, value, attr, strict_mode, set_mode);
+ js_object->ValidateElements();
if (result.is_null()) return Failure::Exception();
return *value;
}
@@ -4669,7 +4489,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
- return TransitionElements(object, FAST_DOUBLE_ELEMENTS, isolate);
+ if (object->IsJSObject()) {
+ Handle<JSObject> js_object(Handle<JSObject>::cast(object));
+ ElementsKind new_kind = js_object->HasFastHoleyElements()
+ ? FAST_HOLEY_DOUBLE_ELEMENTS
+ : FAST_DOUBLE_ELEMENTS;
+ return TransitionElements(object, new_kind, isolate);
+ } else {
+ return *object;
+ }
}
@@ -4677,7 +4505,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
- return TransitionElements(object, FAST_ELEMENTS, isolate);
+ if (object->IsJSObject()) {
+ Handle<JSObject> js_object(Handle<JSObject>::cast(object));
+ ElementsKind new_kind = js_object->HasFastHoleyElements()
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ return TransitionElements(object, new_kind, isolate);
+ } else {
+ return *object;
+ }
}
@@ -4708,32 +4544,38 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
HandleScope scope;
Object* raw_boilerplate_object = literals->get(literal_index);
- Handle<JSArray> boilerplate(JSArray::cast(raw_boilerplate_object));
-#if DEBUG
+ Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
ElementsKind elements_kind = object->GetElementsKind();
-#endif
- ASSERT(elements_kind <= FAST_DOUBLE_ELEMENTS);
+ ASSERT(IsFastElementsKind(elements_kind));
// Smis should never trigger transitions.
ASSERT(!value->IsSmi());
if (value->IsNumber()) {
- ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
- JSObject::TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
- if (IsMoreGeneralElementsKindTransition(boilerplate->GetElementsKind(),
- FAST_DOUBLE_ELEMENTS)) {
- JSObject::TransitionElementsKind(boilerplate, FAST_DOUBLE_ELEMENTS);
- }
- ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+ ASSERT(IsFastSmiElementsKind(elements_kind));
+ ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
+ ? FAST_HOLEY_DOUBLE_ELEMENTS
+ : FAST_DOUBLE_ELEMENTS;
+ if (IsMoreGeneralElementsKindTransition(
+ boilerplate_object->GetElementsKind(),
+ transitioned_kind)) {
+ JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
+ }
+ JSObject::TransitionElementsKind(object, transitioned_kind);
+ ASSERT(IsFastDoubleElementsKind(object->GetElementsKind()));
FixedDoubleArray* double_array = FixedDoubleArray::cast(object->elements());
HeapNumber* number = HeapNumber::cast(*value);
double_array->set(store_index, number->Number());
} else {
- ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
- elements_kind == FAST_DOUBLE_ELEMENTS);
- JSObject::TransitionElementsKind(object, FAST_ELEMENTS);
- if (IsMoreGeneralElementsKindTransition(boilerplate->GetElementsKind(),
- FAST_ELEMENTS)) {
- JSObject::TransitionElementsKind(boilerplate, FAST_ELEMENTS);
+ ASSERT(IsFastSmiElementsKind(elements_kind) ||
+ IsFastDoubleElementsKind(elements_kind));
+ ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ JSObject::TransitionElementsKind(object, transitioned_kind);
+ if (IsMoreGeneralElementsKindTransition(
+ boilerplate_object->GetElementsKind(),
+ transitioned_kind)) {
+ JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
}
FixedArray* object_array = FixedArray::cast(object->elements());
object_array->set(store_index, *value);
@@ -4745,30 +4587,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
// Check whether debugger and is about to step into the callback that is passed
// to a built-in function such as Array.forEach.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
- if (!isolate->IsDebuggerActive()) return isolate->heap()->false_value();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (!isolate->IsDebuggerActive() || !isolate->debug()->StepInActive()) {
+ return isolate->heap()->false_value();
+ }
CONVERT_ARG_CHECKED(Object, callback, 0);
// We do not step into the callback if it's a builtin or not even a function.
if (!callback->IsJSFunction() || JSFunction::cast(callback)->IsBuiltin()) {
return isolate->heap()->false_value();
}
return isolate->heap()->true_value();
+#else
+ return isolate->heap()->false_value();
+#endif // ENABLE_DEBUGGER_SUPPORT
}
// Set one shot breakpoints for the callback function that is passed to a
// built-in function such as Array.forEach to enable stepping into the callback.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
- if (!debug->IsStepping()) return NULL;
- CONVERT_ARG_CHECKED(Object, callback, 0);
+ if (!debug->IsStepping()) return isolate->heap()->undefined_value();
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
HandleScope scope(isolate);
- Handle<SharedFunctionInfo> shared_info(JSFunction::cast(callback)->shared());
// When leaving the callback, step out has been activated, but not performed
// if we do not leave the builtin. To be able to step into the callback
// again, we need to clear the step out at this point.
debug->ClearStepOut();
- debug->FloodWithOneShot(shared_info);
- return NULL;
+ debug->FloodWithOneShot(callback);
+#endif // ENABLE_DEBUGGER_SUPPORT
+ return isolate->heap()->undefined_value();
}
@@ -4896,7 +4745,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
uint32_t index;
if (key->AsArrayIndex(&index)) {
- JSObject::LocalElementType type = object->HasLocalElement(index);
+ JSObject::LocalElementType type = object->GetLocalElementType(index);
switch (type) {
case JSObject::UNDEFINED_ELEMENT:
case JSObject::STRING_CHARACTER_ELEMENT:
@@ -5248,15 +5097,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ToSlowProperties) {
- ASSERT(args.length() == 1);
- Object* obj = args[0];
- return (obj->IsJSObject() && !obj->IsJSGlobalProxy())
- ? JSObject::cast(obj)->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0)
- : obj;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -5946,7 +5786,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSArray, array, 0);
- if (!array->HasFastElements()) return isolate->heap()->undefined_value();
+ if (!array->HasFastObjectElements()) {
+ return isolate->heap()->undefined_value();
+ }
FixedArray* elements = FixedArray::cast(array->elements());
int n = elements->length();
bool ascii = true;
@@ -5983,6 +5825,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BasicJSONStringify) {
+ ASSERT(args.length() == 1);
+ HandleScope scope(isolate);
+ BasicJsonStringifier stringifier(isolate);
+ return stringifier.Stringify(Handle<Object>(args[0]));
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
NoHandleAllocation ha;
@@ -6346,11 +6196,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
RUNTIME_ASSERT(pattern_length > 0);
if (limit == 0xffffffffu) {
- Handle<Object> cached_answer(StringSplitCache::Lookup(
- isolate->heap()->string_split_cache(),
+ Handle<Object> cached_answer(RegExpResultsCache::Lookup(
+ isolate->heap(),
*subject,
- *pattern));
+ *pattern,
+ RegExpResultsCache::STRING_SPLIT_SUBSTRINGS));
if (*cached_answer != Smi::FromInt(0)) {
+ // The cache FixedArray is a COW-array and can therefore be reused.
Handle<JSArray> result =
isolate->factory()->NewJSArrayWithElements(
Handle<FixedArray>::cast(cached_answer));
@@ -6366,17 +6218,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
static const int kMaxInitialListCapacity = 16;
- ZoneScope scope(isolate, DELETE_ON_EXIT);
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope scope(zone, DELETE_ON_EXIT);
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
- ZoneList<int> indices(initial_capacity);
+ ZoneList<int> indices(initial_capacity, zone);
if (!pattern->IsFlat()) FlattenString(pattern);
- FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit);
+ FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit, zone);
if (static_cast<uint32_t>(indices.length()) < limit) {
- indices.Add(subject_length);
+ indices.Add(subject_length, zone);
}
// The list indices now contains the end of each part to create.
@@ -6389,7 +6242,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
if (maybe_result->IsFailure()) return maybe_result;
result->set_length(Smi::FromInt(part_count));
- ASSERT(result->HasFastElements());
+ ASSERT(result->HasFastObjectElements());
if (part_count == 1 && indices.at(0) == subject_length) {
FixedArray::cast(result->elements())->set(0, *subject);
@@ -6408,12 +6261,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
}
if (limit == 0xffffffffu) {
- if (result->HasFastElements()) {
- StringSplitCache::Enter(isolate->heap(),
- isolate->heap()->string_split_cache(),
- *subject,
- *pattern,
- *elements);
+ if (result->HasFastObjectElements()) {
+ RegExpResultsCache::Enter(isolate->heap(),
+ *subject,
+ *pattern,
+ *elements,
+ RegExpResultsCache::STRING_SPLIT_SUBSTRINGS);
}
}
@@ -6765,7 +6618,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (maybe_result->IsFailure()) return maybe_result;
int special_length = special->length();
- if (!array->HasFastElements()) {
+ if (!array->HasFastObjectElements()) {
return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
@@ -6875,7 +6728,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
int array_length = args.smi_at(1);
CONVERT_ARG_CHECKED(String, separator, 2);
- if (!array->HasFastElements()) {
+ if (!array->HasFastObjectElements()) {
return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
@@ -6992,8 +6845,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
- RUNTIME_ASSERT(elements_array->HasFastElements() ||
- elements_array->HasFastSmiOnlyElements());
+ RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
CONVERT_ARG_CHECKED(String, separator, 2);
// elements_array is fast-mode JSarray of alternating positions
@@ -7703,8 +7555,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
isolate->heap()->non_strict_arguments_elements_map());
Handle<Map> old_map(result->map());
- Handle<Map> new_map =
- isolate->factory()->CopyMapDropTransitions(old_map);
+ Handle<Map> new_map = isolate->factory()->CopyMap(old_map);
new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_map(*new_map);
@@ -8061,17 +7912,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
// instead of a new JSFunction object. This way, errors are
// reported the same way whether or not 'Function' is called
// using 'new'.
- return isolate->context()->global();
+ return isolate->context()->global_object();
}
}
// The function should be compiled for the optimization hints to be
- // available. We cannot use EnsureCompiled because that forces a
- // compilation through the shared function info which makes it
- // impossible for us to optimize.
- if (!function->is_compiled()) {
- JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
- }
+ // available.
+ JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
if (!function->has_initial_map() &&
@@ -8149,7 +7996,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
// If the function is not optimizable or debugger is active continue using the
// code from the full compiler.
- if (!function->shared()->code()->optimizable() ||
+ if (!FLAG_crankshaft ||
+ !function->shared()->code()->optimizable() ||
isolate->DebuggerHasBreakPoints()) {
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
@@ -8163,7 +8011,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
}
function->shared()->code()->set_profiler_ticks(0);
if (JSFunction::CompileOptimized(function,
- AstNode::kNoNumber,
+ BailoutId::None(),
CLEAR_EXCEPTION)) {
return function->code();
}
@@ -8177,6 +8025,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
+ HandleScope handle_scope(isolate);
+ ASSERT(FLAG_parallel_recompilation);
+ Compiler::RecompileParallel(args.at<JSFunction>(0));
+ return *isolate->factory()->undefined_value();
+}
+
+
class ActivationsFinder : public ThreadVisitor {
public:
explicit ActivationsFinder(JSFunction* function)
@@ -8202,35 +8058,6 @@ class ActivationsFinder : public ThreadVisitor {
};
-static void MaterializeArgumentsObjectInFrame(Isolate* isolate,
- JavaScriptFrame* frame) {
- Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
- Handle<Object> arguments;
- for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
- if (frame->GetExpression(i) == isolate->heap()->arguments_marker()) {
- if (arguments.is_null()) {
- // FunctionGetArguments can't throw an exception, so cast away the
- // doubt with an assert.
- arguments = Handle<Object>(
- Accessors::FunctionGetArguments(*function,
- NULL)->ToObjectUnchecked());
- ASSERT(*arguments != isolate->heap()->null_value());
- ASSERT(*arguments != isolate->heap()->undefined_value());
- }
- frame->SetExpression(i, *arguments);
- if (FLAG_trace_deopt) {
- PrintF("Materializing arguments object for frame %p - %p: %p ",
- reinterpret_cast<void*>(frame->sp()),
- reinterpret_cast<void*>(frame->fp()),
- reinterpret_cast<void*>(*arguments));
- arguments->ShortPrint();
- PrintF("\n");
- }
- }
- }
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8239,25 +8066,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(isolate->heap()->IsAllocationAllowed());
- int jsframes = deoptimizer->jsframe_count();
+ JavaScriptFrameIterator it(isolate);
- deoptimizer->MaterializeHeapNumbers();
+ // Make sure to materialize objects before causing any allocation.
+ deoptimizer->MaterializeHeapObjects(&it);
delete deoptimizer;
- JavaScriptFrameIterator it(isolate);
- for (int i = 0; i < jsframes - 1; i++) {
- MaterializeArgumentsObjectInFrame(isolate, it.frame());
- it.Advance();
- }
-
JavaScriptFrame* frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
- MaterializeArgumentsObjectInFrame(isolate, frame);
-
- if (type == Deoptimizer::EAGER) {
- RUNTIME_ASSERT(function->IsOptimized());
- }
+ RUNTIME_ASSERT(type != Deoptimizer::EAGER || function->IsOptimized());
// Avoid doing too much work when running with --always-opt and keep
// the optimized code around.
@@ -8265,11 +8083,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
return isolate->heap()->undefined_value();
}
- // Find other optimized activations of the function.
+ // Find other optimized activations of the function or functions that
+ // share the same optimized code.
bool has_other_activations = false;
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == *function) {
+ JSFunction* other_function = JSFunction::cast(frame->function());
+ if (frame->is_optimized() && other_function->code() == function->code()) {
has_other_activations = true;
break;
}
@@ -8292,6 +8112,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
} else {
Deoptimizer::DeoptimizeFunction(*function);
}
+ // Flush optimized code cache for this function.
+ function->shared()->ClearOptimizedCodeMap();
+
return isolate->heap()->undefined_value();
}
@@ -8368,6 +8191,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
return Smi::FromInt(4); // 4 == "never".
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ if (FLAG_parallel_recompilation) {
+ if (function->IsMarkedForLazyRecompilation()) {
+ return Smi::FromInt(5);
+ }
+ }
if (FLAG_always_opt) {
// We may have always opt, but that is more best-effort than a real
// promise, so we still say "no" if it is not optimized.
@@ -8414,7 +8242,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
}
}
- int ast_id = AstNode::kNoNumber;
+ BailoutId ast_id = BailoutId::None();
if (succeeded) {
// The top JS function is this one, the PC is somewhere in the
// unoptimized code.
@@ -8435,14 +8263,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
// Table entries are (AST id, pc offset) pairs.
uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
if (pc_offset == target_pc_offset) {
- ast_id = static_cast<int>(Memory::uint32_at(table_cursor));
+ ast_id = BailoutId(static_cast<int>(Memory::uint32_at(table_cursor)));
break;
}
table_cursor += 2 * kIntSize;
}
- ASSERT(ast_id != AstNode::kNoNumber);
+ ASSERT(!ast_id.IsNone());
if (FLAG_trace_osr) {
- PrintF("[replacing on-stack at AST id %d in ", ast_id);
+ PrintF("[replacing on-stack at AST id %d in ", ast_id.ToInt());
function->PrintName();
PrintF("]\n");
}
@@ -8459,7 +8287,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
PrintF("[on-stack replacement offset %d in optimized code]\n",
data->OsrPcOffset()->value());
}
- ASSERT(data->OsrAstId()->value() == ast_id);
+ ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
} else {
// We may never generate the desired OSR entry if we emit an
// early deoptimize.
@@ -8498,7 +8326,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
// frame to an optimized one.
if (succeeded) {
ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- return Smi::FromInt(ast_id);
+ return Smi::FromInt(ast_id.ToInt());
} else {
if (function->IsMarkedForLazyRecompilation()) {
function->ReplaceCode(function->shared()->code());
@@ -8607,6 +8435,26 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_CHECKED(ScopeInfo, scope_info, 1);
+ Context* result;
+ MaybeObject* maybe_result =
+ isolate->heap()->AllocateGlobalContext(function, scope_info);
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ ASSERT(function->context() == isolate->context());
+ ASSERT(function->context()->global_object() == result->global_object());
+ isolate->set_context(result);
+ result->global_object()->set_global_context(result);
+
+ return result; // non-failure
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -8615,14 +8463,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
SharedFunctionInfo* shared = function->shared();
// TODO: The QML mode should be checked in the ContextLength function.
int length = shared->scope_info()->ContextLength(shared->qml_mode());
+ Context* result;
+ MaybeObject* maybe_result =
+ isolate->heap()->AllocateFunctionContext(length, function);
+ if (!maybe_result->To(&result)) return maybe_result;
- Object* result;
- { MaybeObject* maybe_result =
- isolate->heap()->AllocateFunctionContext(length, function);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- isolate->set_context(Context::cast(result));
+ isolate->set_context(result);
return result; // non-failure
}
@@ -8655,8 +8501,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
if (args[1]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
// than some function. There is a canonical empty function that can be
- // gotten from the global context.
- function = isolate->context()->global_context()->closure();
+ // gotten from the native context.
+ function = isolate->context()->native_context()->closure();
} else {
function = JSFunction::cast(args[1]);
}
@@ -8681,8 +8527,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
if (args[2]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
// than some function. There is a canonical empty function that can be
- // gotten from the global context.
- function = isolate->context()->global_context()->closure();
+ // gotten from the native context.
+ function = isolate->context()->native_context()->closure();
} else {
function = JSFunction::cast(args[2]);
}
@@ -8706,8 +8552,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
if (args[1]->IsSmi()) {
// A smi sentinel indicates a context nested inside global code rather
// than some function. There is a canonical empty function that can be
- // gotten from the global context.
- function = isolate->context()->global_context()->closure();
+ // gotten from the native context.
+ function = isolate->context()->native_context()->closure();
} else {
function = JSFunction::cast(args[1]);
}
@@ -8722,19 +8568,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
+ ASSERT(args.length() == 1);
+ Object* obj = args[0];
+ return isolate->heap()->ToBoolean(obj->IsJSModule());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(ScopeInfo, scope_info, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 1);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 0);
- Context* context;
- MaybeObject* maybe_context =
- isolate->heap()->AllocateModuleContext(isolate->context(),
- scope_info);
- if (!maybe_context->To(&context)) return maybe_context;
- // Also initialize the context slot of the instance object.
- instance->set_context(context);
+ Context* context = Context::cast(instance->context());
+ Context* previous = isolate->context();
+ ASSERT(context->IsModuleContext());
+ // Initialize the context links.
+ context->set_previous(previous);
+ context->set_closure(previous->closure());
+ context->set_global_object(previous->global_object());
isolate->set_context(context);
return context;
@@ -8820,7 +8672,7 @@ static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
Context* top = isolate->context();
// Get the context extension function.
JSFunction* context_extension_function =
- top->global_context()->context_extension_function();
+ top->native_context()->context_extension_function();
// If the holder isn't a context extension object, we just return it
// as the receiver. This allows arguments objects to be used as
// receivers, but only if they are put in the context scope chain
@@ -8999,7 +8851,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
}
// In non-strict mode, the property is added to the global object.
attributes = NONE;
- object = Handle<JSObject>(isolate->context()->global());
+ object = Handle<JSObject>(isolate->context()->global_object());
}
// Set the property if it's not read only or doesn't yet exist.
@@ -9053,6 +8905,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "not_date_object", HandleVector<Object>(NULL, 0)));
+}
+
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
ASSERT(args.length() == 0);
@@ -9180,7 +9041,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
MaybeObject* maybe_result_array =
output->EnsureCanContainHeapObjectElements();
if (maybe_result_array->IsFailure()) return maybe_result_array;
- RUNTIME_ASSERT(output->HasFastElements());
+ RUNTIME_ASSERT(output->HasFastObjectElements());
AssertNoAllocation no_allocation;
@@ -9242,13 +9103,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
ASSERT_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ Zone* zone = isolate->runtime_zone();
source = Handle<String>(source->TryFlattenGetString());
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
if (source->IsSeqAsciiString()) {
- result = JsonParser<true>::Parse(source);
+ result = JsonParser<true>::Parse(source, zone);
} else {
- result = JsonParser<false>::Parse(source);
+ result = JsonParser<false>::Parse(source, zone);
}
if (result.is_null()) {
// Syntax error or stack overflow in scanner.
@@ -9281,18 +9143,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
ASSERT_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- // Extract global context.
- Handle<Context> context(isolate->context()->global_context());
+ // Extract native context.
+ Handle<Context> context(isolate->context()->native_context());
- // Check if global context allows code generation from
+ // Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
if (context->allow_code_gen_from_strings()->IsFalse() &&
!CodeGenerationFromStringsAllowed(isolate, context)) {
- return isolate->Throw(*isolate->factory()->NewError(
- "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
+ Handle<Object> error_message =
+ context->ErrorMessageForCodeGenerationFromStrings();
+ return isolate->Throw(*isolate->factory()->NewEvalError(
+ "code_gen_from_strings", HandleVector<Object>(&error_message, 1)));
}
- // Compile source string in the global context.
+ // Compile source string in the native context.
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition, false);
if (shared.is_null()) return Failure::Exception();
@@ -9311,14 +9175,16 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
int scope_position,
bool qml_mode) {
Handle<Context> context = Handle<Context>(isolate->context());
- Handle<Context> global_context = Handle<Context>(context->global_context());
+ Handle<Context> native_context = Handle<Context>(context->native_context());
- // Check if global context allows code generation from
+ // Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
- if (global_context->allow_code_gen_from_strings()->IsFalse() &&
- !CodeGenerationFromStringsAllowed(isolate, global_context)) {
- isolate->Throw(*isolate->factory()->NewError(
- "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
+ if (native_context->allow_code_gen_from_strings()->IsFalse() &&
+ !CodeGenerationFromStringsAllowed(isolate, native_context)) {
+ Handle<Object> error_message =
+ native_context->ErrorMessageForCodeGenerationFromStrings();
+ isolate->Throw(*isolate->factory()->NewEvalError(
+ "code_gen_from_strings", HandleVector<Object>(&error_message, 1)));
return MakePair(Failure::Exception(), NULL);
}
@@ -9327,7 +9193,7 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
source,
Handle<Context>(isolate->context()),
- context->IsGlobalContext(),
+ context->IsNativeContext(),
language_mode,
scope_position,
qml_mode);
@@ -9350,7 +9216,7 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
// (And even if it is, but the first argument isn't a string, just let
// execution default to an indirect call to eval, which will also return
// the first argument without doing anything).
- if (*callee != isolate->global_context()->global_eval_fun() ||
+ if (*callee != isolate->native_context()->global_eval_fun() ||
!args[1]->IsString()) {
return MakePair(*callee, isolate->heap()->the_hole_value());
}
@@ -9414,8 +9280,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, array, 0);
- CONVERT_ARG_CHECKED(JSObject, element, 1);
- RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements());
+ CONVERT_ARG_CHECKED(JSReceiver, element, 1);
+ RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
for (int i = 0; i < length; i++) {
@@ -9500,7 +9366,7 @@ class ArrayConcatVisitor {
Handle<Map> map;
if (fast_elements_) {
map = isolate_->factory()->GetElementsTransitionMap(array,
- FAST_ELEMENTS);
+ FAST_HOLEY_ELEMENTS);
} else {
map = isolate_->factory()->GetElementsTransitionMap(array,
DICTIONARY_ELEMENTS);
@@ -9559,8 +9425,10 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
uint32_t length = static_cast<uint32_t>(array->length()->Number());
int element_count = 0;
switch (array->GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
ASSERT(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
@@ -9572,6 +9440,7 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
break;
}
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
// TODO(1810): Decide if it's worthwhile to implement this.
UNREACHABLE();
break;
@@ -9662,8 +9531,10 @@ static void CollectElementIndices(Handle<JSObject> object,
List<uint32_t>* indices) {
ElementsKind kind = object->GetElementsKind();
switch (kind) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
Handle<FixedArray> elements(FixedArray::cast(object->elements()));
uint32_t length = static_cast<uint32_t>(elements->length());
if (range < length) length = range;
@@ -9674,6 +9545,7 @@ static void CollectElementIndices(Handle<JSObject> object,
}
break;
}
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// TODO(1810): Decide if it's worthwhile to implement this.
UNREACHABLE();
@@ -9788,8 +9660,10 @@ static bool IterateElements(Isolate* isolate,
ArrayConcatVisitor* visitor) {
uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
switch (receiver->GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
@@ -9810,6 +9684,7 @@ static bool IterateElements(Isolate* isolate,
}
break;
}
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// TODO(1810): Decide if it's worthwhile to implement this.
UNREACHABLE();
@@ -9907,7 +9782,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0);
int argument_count = static_cast<int>(arguments->length()->Number());
- RUNTIME_ASSERT(arguments->HasFastElements());
+ RUNTIME_ASSERT(arguments->HasFastObjectElements());
Handle<FixedArray> elements(FixedArray::cast(arguments->elements()));
// Pass 1: estimate the length and number of elements of the result.
@@ -9927,10 +9802,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
Handle<JSArray> array(Handle<JSArray>::cast(obj));
// TODO(1810): Find out if it's worthwhile to properly support
// arbitrary ElementsKinds. For now, pessimistically transition to
- // FAST_ELEMENTS.
+ // FAST_*_ELEMENTS.
if (array->HasFastDoubleElements()) {
+ ElementsKind to_kind = FAST_ELEMENTS;
+ if (array->HasFastHoleyElements()) {
+ to_kind = FAST_HOLEY_ELEMENTS;
+ }
array = Handle<JSArray>::cast(
- JSObject::TransitionElementsKind(array, FAST_ELEMENTS));
+ JSObject::TransitionElementsKind(array, to_kind));
}
length_estimate =
static_cast<uint32_t>(array->length()->Number());
@@ -10027,29 +9906,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, from, 0);
CONVERT_ARG_CHECKED(JSArray, to, 1);
+ from->ValidateElements();
+ to->ValidateElements();
FixedArrayBase* new_elements = from->elements();
+ ElementsKind from_kind = from->GetElementsKind();
MaybeObject* maybe_new_map;
- ElementsKind elements_kind;
- if (new_elements->map() == isolate->heap()->fixed_array_map() ||
- new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
- elements_kind = FAST_ELEMENTS;
- } else if (new_elements->map() ==
- isolate->heap()->fixed_double_array_map()) {
- elements_kind = FAST_DOUBLE_ELEMENTS;
- } else {
- elements_kind = DICTIONARY_ELEMENTS;
- }
- maybe_new_map = to->GetElementsTransitionMap(isolate, elements_kind);
+ maybe_new_map = to->GetElementsTransitionMap(isolate, from_kind);
Object* new_map;
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
- to->set_map(Map::cast(new_map));
- to->set_elements(new_elements);
+ to->set_map_and_elements(Map::cast(new_map), new_elements);
to->set_length(from->length());
Object* obj;
{ MaybeObject* maybe_obj = from->ResetElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
from->set_length(Smi::FromInt(0));
+ to->ValidateElements();
return to;
}
@@ -10099,8 +9971,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
}
return *isolate->factory()->NewJSArrayWithElements(keys);
} else {
- ASSERT(array->HasFastElements() ||
- array->HasFastSmiOnlyElements() ||
+ ASSERT(array->HasFastSmiOrObjectElements() ||
array->HasFastDoubleElements());
Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
// -1 means start of array.
@@ -10119,11 +9990,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_CHECKED(String, name, 1);
CONVERT_SMI_ARG_CHECKED(flag, 2);
AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
- return obj->LookupAccessor(name, component);
+ if (!receiver->IsJSObject()) return isolate->heap()->undefined_value();
+ return JSObject::cast(receiver)->LookupAccessor(name, component);
}
@@ -10214,12 +10086,10 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
}
}
case INTERCEPTOR:
- case MAP_TRANSITION:
- case ELEMENTS_TRANSITION:
- case CONSTANT_TRANSITION:
- case NULL_DESCRIPTOR:
+ case TRANSITION:
return heap->undefined_value();
case HANDLER:
+ case NONEXISTENT:
UNREACHABLE();
return heap->undefined_value();
}
@@ -10252,7 +10122,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
// entered (if the debugger is entered). The reason for switching context here
// is that for some property lookups (accessors and interceptors) callbacks
// into the embedding application can occour, and the embedding application
- // could have the assumption that its own global context is the current
+ // could have the assumption that its own native context is the current
// context and not some internal debugger context.
SaveContext save(isolate);
if (isolate->debug()->InDebugger()) {
@@ -10291,13 +10161,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
for (int i = 0; i < length; i++) {
LookupResult result(isolate);
jsproto->LocalLookup(*name, &result);
- if (result.IsProperty()) {
+ if (result.IsFound()) {
// LookupResult is not GC safe as it holds raw object pointers.
// GC can happen later in this code so put the required fields into
// local variables using handles when required for later use.
- PropertyType result_type = result.type();
Handle<Object> result_callback_obj;
- if (result_type == CALLBACKS) {
+ if (result.IsPropertyCallbacks()) {
result_callback_obj = Handle<Object>(result.GetCallbackObject(),
isolate);
}
@@ -10315,7 +10184,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
// If the callback object is a fixed array then it contains JavaScript
// getter and/or setter.
- bool hasJavaScriptAccessors = result_type == CALLBACKS &&
+ bool hasJavaScriptAccessors = result.IsPropertyCallbacks() &&
result_callback_obj->IsAccessorPair();
Handle<FixedArray> details =
isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
@@ -10349,7 +10218,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
LookupResult result(isolate);
obj->Lookup(*name, &result);
- if (result.IsProperty()) {
+ if (result.IsFound()) {
return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
}
return isolate->heap()->undefined_value();
@@ -10379,7 +10248,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
- return Smi::FromInt(details.index());
+ // TODO(verwaest): Depends on the type of details.
+ return Smi::FromInt(details.dictionary_index());
}
@@ -10798,12 +10668,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// value object is not converted into a wrapped JS objects. To
// hide this optimization from the debugger, we wrap the receiver
// by creating correct wrapper object based on the calling frame's
- // global context.
+ // native context.
it.Advance();
- Handle<Context> calling_frames_global_context(
- Context::cast(Context::cast(it.frame()->context())->global_context()));
+ Handle<Context> calling_frames_native_context(
+ Context::cast(Context::cast(it.frame()->context())->native_context()));
receiver =
- isolate->factory()->ToObject(receiver, calling_frames_global_context);
+ isolate->factory()->ToObject(receiver, calling_frames_native_context);
}
details->set(kFrameDetailsReceiverIndex, *receiver);
@@ -10895,7 +10765,7 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
// These will be variables introduced by eval.
if (function_context->closure() == *function) {
if (function_context->has_extension() &&
- !function_context->IsGlobalContext()) {
+ !function_context->IsNativeContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
bool threw = false;
Handle<FixedArray> keys =
@@ -11069,7 +10939,8 @@ class ScopeIterator {
inlined_jsframe_index_(inlined_jsframe_index),
function_(JSFunction::cast(frame->function())),
context_(Context::cast(frame->context())),
- nested_scope_chain_(4) {
+ nested_scope_chain_(4),
+ failed_(false) {
// Catch the case when the debugger stops in an internal function.
Handle<SharedFunctionInfo> shared_info(function_->shared());
@@ -11082,7 +10953,7 @@ class ScopeIterator {
}
// Get the debug info (create it if it does not exist).
- if (!isolate->debug()->EnsureDebugInfo(shared_info)) {
+ if (!isolate->debug()->EnsureDebugInfo(shared_info, function_)) {
// Return if ensuring debug info failed.
return;
}
@@ -11107,7 +10978,6 @@ class ScopeIterator {
if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
} else {
// Reparse the code and analyze the scopes.
- ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
Handle<Script> script(Script::cast(shared_info->script()));
Scope* scope = NULL;
@@ -11115,36 +10985,25 @@ class ScopeIterator {
Handle<ScopeInfo> scope_info(shared_info->scope_info());
if (scope_info->Type() != FUNCTION_SCOPE) {
// Global or eval code.
- CompilationInfo info(script);
+ CompilationInfoWithZone info(script);
if (scope_info->Type() == GLOBAL_SCOPE) {
info.MarkAsGlobal();
} else {
ASSERT(scope_info->Type() == EVAL_SCOPE);
info.MarkAsEval();
- info.SetCallingContext(Handle<Context>(function_->context()));
+ info.SetContext(Handle<Context>(function_->context()));
}
if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
+ RetrieveScopeChain(scope, shared_info);
} else {
// Function code
- CompilationInfo info(shared_info);
+ CompilationInfoWithZone info(shared_info);
if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
- }
-
- // Retrieve the scope chain for the current position.
- if (scope != NULL) {
- int source_position = shared_info->code()->SourcePosition(frame_->pc());
- scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
- } else {
- // A failed reparse indicates that the preparser has diverged from the
- // parser or that the preparse data given to the initial parse has been
- // faulty. We fail in debug mode but in release mode we only provide the
- // information we get from the context chain but nothing about
- // completely stack allocated scopes or stack allocated locals.
- UNREACHABLE();
+ RetrieveScopeChain(scope, shared_info);
}
}
}
@@ -11155,21 +11014,28 @@ class ScopeIterator {
frame_(NULL),
inlined_jsframe_index_(0),
function_(function),
- context_(function->context()) {
+ context_(function->context()),
+ failed_(false) {
if (function->IsBuiltin()) {
context_ = Handle<Context>();
}
}
// More scopes?
- bool Done() { return context_.is_null(); }
+ bool Done() {
+ ASSERT(!failed_);
+ return context_.is_null();
+ }
+
+ bool Failed() { return failed_; }
// Move to the next scope.
void Next() {
+ ASSERT(!failed_);
ScopeType scope_type = Type();
if (scope_type == ScopeTypeGlobal) {
// The global scope is always the last in the chain.
- ASSERT(context_->IsGlobalContext());
+ ASSERT(context_->IsNativeContext());
context_ = Handle<Context>();
return;
}
@@ -11186,6 +11052,7 @@ class ScopeIterator {
// Return the type of the current scope.
ScopeType Type() {
+ ASSERT(!failed_);
if (!nested_scope_chain_.is_empty()) {
Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
switch (scope_info->Type()) {
@@ -11197,7 +11064,7 @@ class ScopeIterator {
ASSERT(context_->IsModuleContext());
return ScopeTypeModule;
case GLOBAL_SCOPE:
- ASSERT(context_->IsGlobalContext());
+ ASSERT(context_->IsNativeContext());
return ScopeTypeGlobal;
case WITH_SCOPE:
ASSERT(context_->IsWithContext());
@@ -11213,8 +11080,8 @@ class ScopeIterator {
UNREACHABLE();
}
}
- if (context_->IsGlobalContext()) {
- ASSERT(context_->global()->IsGlobalObject());
+ if (context_->IsNativeContext()) {
+ ASSERT(context_->global_object()->IsGlobalObject());
return ScopeTypeGlobal;
}
if (context_->IsFunctionContext()) {
@@ -11235,9 +11102,10 @@ class ScopeIterator {
// Return the JavaScript object with the content of the current scope.
Handle<JSObject> ScopeObject() {
+ ASSERT(!failed_);
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
- return Handle<JSObject>(CurrentContext()->global());
+ return Handle<JSObject>(CurrentContext()->global_object());
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
ASSERT(nested_scope_chain_.length() == 1);
@@ -11260,6 +11128,7 @@ class ScopeIterator {
}
Handle<ScopeInfo> CurrentScopeInfo() {
+ ASSERT(!failed_);
if (!nested_scope_chain_.is_empty()) {
return nested_scope_chain_.last();
} else if (context_->IsBlockContext()) {
@@ -11273,6 +11142,7 @@ class ScopeIterator {
// Return the context for this scope. For the local context there might not
// be an actual context.
Handle<Context> CurrentContext() {
+ ASSERT(!failed_);
if (Type() == ScopeTypeGlobal ||
nested_scope_chain_.is_empty()) {
return context_;
@@ -11286,6 +11156,7 @@ class ScopeIterator {
#ifdef DEBUG
// Debug print of the content of the current scope.
void DebugPrint() {
+ ASSERT(!failed_);
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
PrintF("Global:\n");
@@ -11343,6 +11214,24 @@ class ScopeIterator {
Handle<JSFunction> function_;
Handle<Context> context_;
List<Handle<ScopeInfo> > nested_scope_chain_;
+ bool failed_;
+
+ void RetrieveScopeChain(Scope* scope,
+ Handle<SharedFunctionInfo> shared_info) {
+ if (scope != NULL) {
+ int source_position = shared_info->code()->SourcePosition(frame_->pc());
+ scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
+ } else {
+ // A failed reparse indicates that the preparser has diverged from the
+ // parser or that the preparse data given to the initial parse has been
+ // faulty. We fail in debug mode but in release mode we only provide the
+ // information we get from the context chain but nothing about
+ // completely stack allocated scopes or stack allocated locals.
+ // Or it could be due to stack overflow.
+ ASSERT(isolate_->has_pending_exception());
+ failed_ = true;
+ }
+ }
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
};
@@ -11611,110 +11500,26 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllowBreakPointRelocation) {
}
-// Set a break point in a function
+// Set a break point in a function.
// args[0]: function
// args[1]: number: break source position (within the function source)
// args[2]: number: break point object
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- Handle<SharedFunctionInfo> shared(fun->shared());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= 0);
Handle<Object> break_point_object_arg = args.at<Object>(2);
// Set break point.
- isolate->debug()->SetBreakPoint(shared, break_point_object_arg,
+ isolate->debug()->SetBreakPoint(function, break_point_object_arg,
&source_position);
return Smi::FromInt(source_position);
}
-Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
- Handle<Script> script,
- int position) {
- // Iterate the heap looking for SharedFunctionInfo generated from the
- // script. The inner most SharedFunctionInfo containing the source position
- // for the requested break point is found.
- // NOTE: This might require several heap iterations. If the SharedFunctionInfo
- // which is found is not compiled it is compiled and the heap is iterated
- // again as the compilation might create inner functions from the newly
- // compiled function and the actual requested break point might be in one of
- // these functions.
- bool done = false;
- // The current candidate for the source position:
- int target_start_position = RelocInfo::kNoPosition;
- Handle<SharedFunctionInfo> target;
- while (!done) {
- { // Extra scope for iterator and no-allocation.
- isolate->heap()->EnsureHeapIsIterable();
- AssertNoAllocation no_alloc_during_heap_iteration;
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL; obj = iterator.next()) {
- if (obj->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
- if (shared->script() == *script) {
- // If the SharedFunctionInfo found has the requested script data and
- // contains the source position it is a candidate.
- int start_position = shared->function_token_position();
- if (start_position == RelocInfo::kNoPosition) {
- start_position = shared->start_position();
- }
- if (start_position <= position &&
- position <= shared->end_position()) {
- // If there is no candidate or this function is within the current
- // candidate this is the new candidate.
- if (target.is_null()) {
- target_start_position = start_position;
- target = shared;
- } else {
- if (target_start_position == start_position &&
- shared->end_position() == target->end_position()) {
- // If a top-level function contain only one function
- // declartion the source for the top-level and the
- // function is the same. In that case prefer the non
- // top-level function.
- if (!shared->is_toplevel()) {
- target_start_position = start_position;
- target = shared;
- }
- } else if (target_start_position <= start_position &&
- shared->end_position() <= target->end_position()) {
- // This containment check includes equality as a function
- // inside a top-level function can share either start or end
- // position with the top-level function.
- target_start_position = start_position;
- target = shared;
- }
- }
- }
- }
- }
- } // End for loop.
- } // End No allocation scope.
-
- if (target.is_null()) {
- return isolate->heap()->undefined_value();
- }
-
- // If the candidate found is compiled we are done. NOTE: when lazy
- // compilation of inner functions is introduced some additional checking
- // needs to be done here to compile inner functions.
- done = target->is_compiled();
- if (!done) {
- // If the candidate is not compiled compile it to reveal any inner
- // functions which might contain the requested source position.
- SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
- }
- } // End while loop.
-
- return *target;
-}
-
-
// Changes the state of a break point in a script and returns source position
// where break point was set. NOTE: Regarding performance see the NOTE for
// GetScriptFromScriptData.
@@ -11733,23 +11538,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
RUNTIME_ASSERT(wrapper->value()->IsScript());
Handle<Script> script(Script::cast(wrapper->value()));
- Object* result = Runtime::FindSharedFunctionInfoInScript(
- isolate, script, source_position);
- if (!result->IsUndefined()) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
- // Find position within function. The script position might be before the
- // source position of the first function.
- int position;
- if (shared->start_position() > source_position) {
- position = 0;
- } else {
- position = source_position - shared->start_position();
- }
- isolate->debug()->SetBreakPoint(shared, break_point_object_arg, &position);
- position += shared->start_position();
- return Smi::FromInt(position);
+ // Set break point.
+ if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
+ &source_position)) {
+ return isolate->heap()->undefined_value();
}
- return isolate->heap()->undefined_value();
+
+ return Smi::FromInt(source_position);
}
@@ -11865,6 +11660,8 @@ static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
List<Handle<Context> > context_chain;
ScopeIterator it(isolate, frame, inlined_jsframe_index);
+ if (it.Failed()) return Handle<Context>::null();
+
for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
ASSERT(!it.Done());
@@ -11893,9 +11690,7 @@ static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
// Materialize the contents of the block scope into a JSObject.
Handle<JSObject> block_scope_object =
MaterializeBlockScope(isolate, current);
- if (block_scope_object.is_null()) {
- return Handle<Context>::null();
- }
+ CHECK(!block_scope_object.is_null());
// Allocate a new function context for the debug evaluation and set the
// extension object.
Handle<Context> new_context =
@@ -12040,7 +11835,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<Context> context =
isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
go_between);
- context->set_extension(*local_scope);
+
+ // Use the materialized local scope in a with context.
+ context =
+ isolate->factory()->NewWithContext(go_between, context, local_scope);
+
// Copy any with contexts present and chain them in front of this context.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context;
@@ -12053,6 +11852,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
context,
frame,
inlined_jsframe_index);
+ if (context.is_null()) {
+ ASSERT(isolate->has_pending_exception());
+ MaybeObject* exception = isolate->pending_exception();
+ isolate->clear_pending_exception();
+ return exception;
+ }
if (additional_context->IsJSObject()) {
Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
@@ -12075,7 +11880,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<SharedFunctionInfo> shared =
Compiler::CompileEval(function_source,
context,
- context->IsGlobalContext(),
+ context->IsNativeContext(),
CLASSIC_MODE,
RelocInfo::kNoPosition,
qml_mode);
@@ -12089,7 +11894,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<Object> evaluation_function =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception, false,
- Handle<Object>(function->context()->qml_global()));
+ Handle<Object>(function->context()->qml_global_object()));
if (has_pending_exception) return Failure::Exception();
Handle<Object> arguments = GetArgumentsObject(isolate,
@@ -12098,6 +11903,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
scope_info,
function_context);
+ // Check if eval is blocked in the context and temporarily allow it
+ // for debugger.
+ Handle<Context> native_context = Handle<Context>(context->native_context());
+ bool eval_disabled =
+ native_context->allow_code_gen_from_strings()->IsFalse();
+ if (eval_disabled) {
+ native_context->set_allow_code_gen_from_strings(
+ isolate->heap()->true_value());
+ }
// Invoke the evaluation function and return the result.
Handle<Object> argv[] = { arguments, source };
Handle<Object> result =
@@ -12106,6 +11920,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
ARRAY_SIZE(argv),
argv,
&has_pending_exception);
+ if (eval_disabled) {
+ native_context->set_allow_code_gen_from_strings(
+ isolate->heap()->false_value());
+ }
if (has_pending_exception) return Failure::Exception();
// Skip the global proxy as it has no properties and always delegates to the
@@ -12148,9 +11966,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
isolate->set_context(*top->context());
}
- // Get the global context now set to the top context from before the
+ // Get the native context now set to the top context from before the
// debugger was invoked.
- Handle<Context> context = isolate->global_context();
+ Handle<Context> context = isolate->native_context();
bool is_global = true;
@@ -12182,7 +12000,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
// Invoke the result of the compilation to get the evaluation function.
bool has_pending_exception;
- Handle<Object> receiver = isolate->global();
+ Handle<Object> receiver = isolate->global_object();
Handle<Object> result =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception);
@@ -12316,7 +12134,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Get the constructor function for context extension and arguments array.
JSObject* arguments_boilerplate =
- isolate->context()->global_context()->arguments_boilerplate();
+ isolate->context()->native_context()->arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
@@ -12345,7 +12163,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Return result as JS array.
Object* result;
MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
- isolate->context()->global_context()->array_function());
+ isolate->context()->native_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
return JSArray::cast(result)->SetContent(instances);
}
@@ -12426,7 +12244,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Return result as JS array.
Object* result;
{ MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
- isolate->context()->global_context()->array_function());
+ isolate->context()->native_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return JSArray::cast(result)->SetContent(instances);
@@ -12451,7 +12269,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
- Handle<String> source(String::cast(args[1]));
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
RUNTIME_ASSERT(script_wrapper->value()->IsScript());
Handle<Script> script(Script::cast(script_wrapper->value()));
@@ -12477,8 +12295,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- Handle<SharedFunctionInfo> shared(func->shared());
- if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
+ if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
return Failure::Exception();
}
func->code()->PrintLn();
@@ -12493,11 +12310,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
ASSERT(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- Handle<SharedFunctionInfo> shared(func->shared());
- if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
+ if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
return Failure::Exception();
}
- shared->construct_stub()->PrintLn();
+ func->shared()->construct_stub()->PrintLn();
#endif // DEBUG
return isolate->heap()->undefined_value();
}
@@ -12542,11 +12358,12 @@ static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
// in OpaqueReferences.
RUNTIME_FUNCTION(MaybeObject*,
Runtime_LiveEditFindSharedFunctionInfosForScript) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 1);
HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, script_value, 0);
-
+ RUNTIME_ASSERT(script_value->value()->IsScript());
Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
const int kBufferSize = 32;
@@ -12588,10 +12405,13 @@ RUNTIME_FUNCTION(MaybeObject*,
// each function with all its descendant is always stored in a continues range
// with the function itself going first. The root function is a script function.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
+
+ RUNTIME_ASSERT(script->value()->IsScript());
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source);
@@ -12607,6 +12427,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
// If old_script_name is provided (i.e. is a String), also creates a copy of
// the script with its original source and sends notification to debugger.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 3);
HandleScope scope(isolate);
CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
@@ -12630,6 +12451,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 1);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
@@ -12639,6 +12461,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
// Replaces code of SharedFunctionInfo with a new one.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
@@ -12649,6 +12472,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
// Connects SharedFunctionInfo to another script.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
Handle<Object> function_object(args[0], isolate);
@@ -12675,6 +12499,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
// In a code of a parent function replaces original function as embedded object
// with a substitution one.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 3);
HandleScope scope(isolate);
@@ -12695,6 +12520,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
// (change_begin, change_end, change_end_new_position).
// Each group describes a change in text; groups are sorted by change_begin.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
@@ -12709,18 +12535,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
// Returns array of the same length with corresponding results of
// LiveEdit::FunctionPatchabilityStatus type.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
- return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
+ return *LiveEdit::CheckAndDropActivations(shared_array, do_drop,
+ isolate->runtime_zone());
}
// Compares 2 strings line-by-line, then token-wise and returns diff in form
// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
// of diff chunks.
RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
@@ -12730,9 +12559,50 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
}
+// Restarts a call frame and completely drops all frames above.
+// Returns true if successful. Otherwise returns undefined or an error message.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
+ CHECK(isolate->debugger()->live_edit_enabled());
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ // Check arguments.
+ Object* check;
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
+ if (!maybe_check->ToObject(&check)) return maybe_check;
+ }
+ CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+ Heap* heap = isolate->heap();
+
+ // Find the relevant frame with the requested index.
+ StackFrame::Id id = isolate->debug()->break_frame_id();
+ if (id == StackFrame::NO_ID) {
+ // If there are no JavaScript stack frames return undefined.
+ return heap->undefined_value();
+ }
+
+ int count = 0;
+ JavaScriptFrameIterator it(isolate, id);
+ for (; !it.done(); it.Advance()) {
+ if (index < count + it.frame()->GetInlineCount()) break;
+ count += it.frame()->GetInlineCount();
+ }
+ if (it.done()) return heap->undefined_value();
+
+ const char* error_message =
+ LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone());
+ if (error_message) {
+ return *(isolate->factory()->LookupAsciiSymbol(error_message));
+ }
+ return heap->true_value();
+}
+
+
// A testing entry. Returns statement position which is the closest to
// source_position.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
+ CHECK(isolate->debugger()->live_edit_enabled());
ASSERT(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -12779,11 +12649,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
bool pending_exception;
{
if (without_debugger) {
- result = Execution::Call(function, isolate->global(), 0, NULL,
+ result = Execution::Call(function, isolate->global_object(), 0, NULL,
&pending_exception);
} else {
EnterDebugger enter_debugger;
- result = Execution::Call(function, isolate->global(), 0, NULL,
+ result = Execution::Call(function, isolate->global_object(), 0, NULL,
&pending_exception);
}
}
@@ -13262,7 +13132,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
Handle<JSFunction> factory(JSFunction::cast(
cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<Object> receiver(isolate->global_context()->global());
+ Handle<Object> receiver(isolate->native_context()->global_object());
// This handle is nor shared, nor used later, so it's safe.
Handle<Object> argv[] = { key_handle };
bool pending_exception;
@@ -13274,7 +13144,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
if (pending_exception) return Failure::Exception();
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
cache_handle->JSFunctionResultCacheVerify();
}
@@ -13305,7 +13175,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
cache_handle->set(index + 1, *value);
cache_handle->set_finger_index(index);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
cache_handle->JSFunctionResultCacheVerify();
}
@@ -13315,33 +13185,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) {
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, type, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 1);
- return *isolate->factory()->NewJSMessageObject(
- type,
- arguments,
- 0,
- 0,
- isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) {
- CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
- return message->type();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) {
- CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
- return message->arguments();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return Smi::FromInt(message->start_position());
@@ -13425,9 +13268,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
return isolate->heap()->ToBoolean(obj->Has##Name()); \
}
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastObjectElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
@@ -13439,6 +13284,8 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalIntElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedIntElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalFloatElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
+// Properties test sitting with elements tests - not fooling anyone.
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
@@ -13450,6 +13297,78 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
}
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
+ return isolate->heap()->ToBoolean(obj->map()->is_observed());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1);
+ if (obj->map()->is_observed() != is_observed) {
+ MaybeObject* maybe = obj->map()->Copy();
+ Map* map;
+ if (!maybe->To(&map)) return maybe;
+ map->set_is_observed(is_observed);
+ obj->set_map(map);
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) {
+ ASSERT(args.length() == 0);
+ isolate->set_observer_delivery_pending(true);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
+ ASSERT(args.length() == 0);
+ return isolate->heap()->observation_state();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectHashTable) {
+ ASSERT(args.length() == 0);
+ return ObjectHashTable::Allocate(0);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableGet) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(ObjectHashTable, table, 0);
+ Object* key = args[1];
+ Object* lookup = table->Lookup(key);
+ return lookup->IsTheHole() ? isolate->heap()->undefined_value() : lookup;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableSet) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(ObjectHashTable, table, 0);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> value = args.at<Object>(2);
+ return *PutIntoObjectHashTable(table, key, value);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableHas) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(ObjectHashTable, table, 0);
+ Object* key = args[1];
+ Object* lookup = table->Lookup(key);
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of Runtime
diff --git a/src/3rdparty/v8/src/runtime.h b/src/3rdparty/v8/src/runtime.h
index cfd8b1e..6428f89 100644
--- a/src/3rdparty/v8/src/runtime.h
+++ b/src/3rdparty/v8/src/runtime.h
@@ -62,7 +62,6 @@ namespace internal {
F(GetIndexedInterceptorElementNames, 1, 1) \
F(GetArgumentsProperty, 1, 1) \
F(ToFastProperties, 1, 1) \
- F(ToSlowProperties, 1, 1) \
F(FinishArrayPrototypeSetup, 1, 1) \
F(SpecialArrayFunctions, 1, 1) \
F(GetDefaultReceiver, 1, 1) \
@@ -86,6 +85,7 @@ namespace internal {
F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
+ F(ParallelRecompile, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
@@ -120,6 +120,7 @@ namespace internal {
F(CharFromCode, 1, 1) \
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
+ F(BasicJSONStringify, 1, 1) \
F(QuoteJSONString, 1, 1) \
F(QuoteJSONStringComma, 1, 1) \
F(QuoteJSONStringArray, 1, 1) \
@@ -267,6 +268,7 @@ namespace internal {
F(DefineOrRedefineDataProperty, 4, 1) \
F(DefineOrRedefineAccessorProperty, 5, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
+ F(GetDataProperty, 2, 1) \
\
/* Arrays */ \
F(RemoveArrayHoles, 2, 1) \
@@ -284,6 +286,9 @@ namespace internal {
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\
+ /* Harmony modules */ \
+ F(IsJSModule, 1, 1) \
+ \
/* Harmony proxies */ \
F(CreateJSProxy, 2, 1) \
F(CreateJSFunctionProxy, 4, 1) \
@@ -299,17 +304,33 @@ namespace internal {
F(SetAdd, 2, 1) \
F(SetHas, 2, 1) \
F(SetDelete, 2, 1) \
+ F(SetGetSize, 1, 1) \
\
/* Harmony maps */ \
F(MapInitialize, 1, 1) \
F(MapGet, 2, 1) \
+ F(MapHas, 2, 1) \
+ F(MapDelete, 2, 1) \
F(MapSet, 3, 1) \
+ F(MapGetSize, 1, 1) \
\
/* Harmony weakmaps */ \
F(WeakMapInitialize, 1, 1) \
F(WeakMapGet, 2, 1) \
+ F(WeakMapHas, 2, 1) \
+ F(WeakMapDelete, 2, 1) \
F(WeakMapSet, 3, 1) \
\
+ /* Harmony observe */ \
+ F(IsObserved, 1, 1) \
+ F(SetIsObserved, 2, 1) \
+ F(SetObserverDeliveryPending, 0, 1) \
+ F(GetObservationState, 0, 1) \
+ F(CreateObjectHashTable, 0, 1) \
+ F(ObjectHashTableGet, 2, 1) \
+ F(ObjectHashTableSet, 3, 1) \
+ F(ObjectHashTableHas, 2, 1) \
+ \
/* Statements */ \
F(NewClosure, 3, 1) \
F(NewObject, 1, 1) \
@@ -318,16 +339,18 @@ namespace internal {
F(Throw, 1, 1) \
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
+ F(ThrowNotDateError, 0, 1) \
F(StackGuard, 0, 1) \
F(Interrupt, 0, 1) \
F(PromoteScheduledException, 0, 1) \
\
/* Contexts */ \
+ F(NewGlobalContext, 2, 1) \
F(NewFunctionContext, 1, 1) \
F(PushWithContext, 2, 1) \
F(PushCatchContext, 3, 1) \
F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 2, 1) \
+ F(PushModuleContext, 1, 1) \
F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
@@ -355,9 +378,6 @@ namespace internal {
F(GetFromCache, 2, 1) \
\
/* Message objects */ \
- F(NewMessageObject, 2, 1) \
- F(MessageGetType, 1, 1) \
- F(MessageGetArguments, 1, 1) \
F(MessageGetStartPosition, 1, 1) \
F(MessageGetScript, 1, 1) \
\
@@ -365,9 +385,11 @@ namespace internal {
F(IS_VAR, 1, 1) \
\
/* expose boolean functions from objects-inl.h */ \
- F(HasFastSmiOnlyElements, 1, 1) \
- F(HasFastElements, 1, 1) \
+ F(HasFastSmiElements, 1, 1) \
+ F(HasFastSmiOrObjectElements, 1, 1) \
+ F(HasFastObjectElements, 1, 1) \
F(HasFastDoubleElements, 1, 1) \
+ F(HasFastHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
F(HasExternalPixelElements, 1, 1) \
F(HasExternalArrayElements, 1, 1) \
@@ -379,6 +401,7 @@ namespace internal {
F(HasExternalUnsignedIntElements, 1, 1) \
F(HasExternalFloatElements, 1, 1) \
F(HasExternalDoubleElements, 1, 1) \
+ F(HasFastProperties, 1, 1) \
F(TransitionElementsSmiToDouble, 1, 1) \
F(TransitionElementsDoubleToObject, 1, 1) \
F(HaveSameMap, 2, 1) \
@@ -441,6 +464,7 @@ namespace internal {
F(LiveEditPatchFunctionPositions, 2, 1) \
F(LiveEditCheckAndDropActivations, 2, 1) \
F(LiveEditCompareStrings, 2, 1) \
+ F(LiveEditRestartFrame, 2, 1) \
F(GetFunctionCodePositionFromSource, 2, 1) \
F(ExecuteInDebugContext, 2, 1) \
\
@@ -636,13 +660,6 @@ class Runtime : public AllStatic {
// Get the intrinsic function with the given FunctionId.
static const Function* FunctionForId(FunctionId id);
- static Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
- Handle<String> subject,
- Handle<String> search,
- Handle<String> replace,
- bool* found,
- int recursion_limit);
-
// General-purpose helper functions for runtime system.
static int StringMatch(Isolate* isolate,
Handle<String> sub,
@@ -685,11 +702,6 @@ class Runtime : public AllStatic {
Handle<Object> object,
Handle<Object> key);
- // This function is used in FunctionNameUsing* tests.
- static Object* FindSharedFunctionInfoInScript(Isolate* isolate,
- Handle<Script> script,
- int position);
-
// Helper functions used stubs.
static void PerformGC(Object* result);
diff --git a/src/3rdparty/v8/src/safepoint-table.cc b/src/3rdparty/v8/src/safepoint-table.cc
index 89ad8af..714e5c3 100644
--- a/src/3rdparty/v8/src/safepoint-table.cc
+++ b/src/3rdparty/v8/src/safepoint-table.cc
@@ -116,8 +116,8 @@ void SafepointTable::PrintBits(uint8_t byte, int digits) {
}
-void Safepoint::DefinePointerRegister(Register reg) {
- registers_->Add(reg.code());
+void Safepoint::DefinePointerRegister(Register reg, Zone* zone) {
+ registers_->Add(reg.code(), zone);
}
@@ -131,15 +131,16 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
info.pc = assembler->pc_offset();
info.arguments = arguments;
info.has_doubles = (kind & Safepoint::kWithDoubles);
- deoptimization_info_.Add(info);
- deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex);
+ deoptimization_info_.Add(info, zone_);
+ deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex, zone_);
if (deopt_mode == Safepoint::kNoLazyDeopt) {
last_lazy_safepoint_ = deopt_index_list_.length();
}
- indexes_.Add(new ZoneList<int>(8));
+ indexes_.Add(new(zone_) ZoneList<int>(8, zone_), zone_);
registers_.Add((kind & Safepoint::kWithRegisters)
- ? new ZoneList<int>(4)
- : NULL);
+ ? new(zone_) ZoneList<int>(4, zone_)
+ : NULL,
+ zone_);
return Safepoint(indexes_.last(), registers_.last());
}
@@ -190,12 +191,12 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
}
// Emit table of bitmaps.
- ZoneList<uint8_t> bits(bytes_per_entry);
+ ZoneList<uint8_t> bits(bytes_per_entry, zone_);
for (int i = 0; i < length; i++) {
ZoneList<int>* indexes = indexes_[i];
ZoneList<int>* registers = registers_[i];
bits.Clear();
- bits.AddBlock(0, bytes_per_entry);
+ bits.AddBlock(0, bytes_per_entry, zone_);
// Run through the registers (if any).
ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
diff --git a/src/3rdparty/v8/src/safepoint-table.h b/src/3rdparty/v8/src/safepoint-table.h
index 57fceec..307d948 100644
--- a/src/3rdparty/v8/src/safepoint-table.h
+++ b/src/3rdparty/v8/src/safepoint-table.h
@@ -183,8 +183,8 @@ class Safepoint BASE_EMBEDDED {
static const int kNoDeoptimizationIndex =
(1 << (SafepointEntry::kDeoptIndexBits)) - 1;
- void DefinePointerSlot(int index) { indexes_->Add(index); }
- void DefinePointerRegister(Register reg);
+ void DefinePointerSlot(int index, Zone* zone) { indexes_->Add(index, zone); }
+ void DefinePointerRegister(Register reg, Zone* zone);
private:
Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
@@ -198,13 +198,14 @@ class Safepoint BASE_EMBEDDED {
class SafepointTableBuilder BASE_EMBEDDED {
public:
- SafepointTableBuilder()
- : deoptimization_info_(32),
- deopt_index_list_(32),
- indexes_(32),
- registers_(32),
+ explicit SafepointTableBuilder(Zone* zone)
+ : deoptimization_info_(32, zone),
+ deopt_index_list_(32, zone),
+ indexes_(32, zone),
+ registers_(32, zone),
emitted_(false),
- last_lazy_safepoint_(0) { }
+ last_lazy_safepoint_(0),
+ zone_(zone) { }
// Get the offset of the emitted safepoint table in the code.
unsigned GetCodeOffset() const;
@@ -242,6 +243,8 @@ class SafepointTableBuilder BASE_EMBEDDED {
bool emitted_;
int last_lazy_safepoint_;
+ Zone* zone_;
+
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
};
diff --git a/src/3rdparty/v8/src/scanner.cc b/src/3rdparty/v8/src/scanner.cc
index f24af2e..61ee1a4 100755
--- a/src/3rdparty/v8/src/scanner.cc
+++ b/src/3rdparty/v8/src/scanner.cc
@@ -32,6 +32,11 @@
#include "../include/v8stdint.h"
#include "char-predicates-inl.h"
+#undef CONST
+#undef DELETE
+#undef IN
+#undef VOID
+
namespace v8 {
namespace internal {
@@ -1077,6 +1082,7 @@ bool Scanner::ScanRegExpFlags() {
if (!ScanLiteralUnicodeEscape()) {
break;
}
+ Advance();
}
}
literal.Complete();
diff --git a/src/3rdparty/v8/src/scopeinfo.cc b/src/3rdparty/v8/src/scopeinfo.cc
index a1f8339..66e2013 100644
--- a/src/3rdparty/v8/src/scopeinfo.cc
+++ b/src/3rdparty/v8/src/scopeinfo.cc
@@ -38,10 +38,10 @@ namespace v8 {
namespace internal {
-Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
+Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
// Collect stack and context locals.
- ZoneList<Variable*> stack_locals(scope->StackLocalCount());
- ZoneList<Variable*> context_locals(scope->ContextLocalCount());
+ ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
+ ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
const int stack_local_count = stack_locals.length();
const int context_local_count = context_locals.length();
@@ -199,8 +199,8 @@ int ScopeInfo::ContextLength(bool qml_function) {
bool has_context = context_locals > 0 ||
function_name_context_slot ||
Type() == WITH_SCOPE ||
- (Type() == FUNCTION_SCOPE && CallsEval());
-
+ (Type() == FUNCTION_SCOPE && CallsEval()) ||
+ Type() == MODULE_SCOPE;
// TODO: The QML mode should be checked in the has_context expression.
if (has_context || qml_function) {
return Context::MIN_CONTEXT_SLOTS + context_locals +
@@ -230,11 +230,7 @@ bool ScopeInfo::HasHeapAllocatedLocals() {
bool ScopeInfo::HasContext() {
- if (length() > 0) {
- return ContextLength() > 0;
- } else {
- return false;
- }
+ return ContextLength() > 0;
}
diff --git a/src/3rdparty/v8/src/scopes.cc b/src/3rdparty/v8/src/scopes.cc
index 6580378..d2a919a 100644
--- a/src/3rdparty/v8/src/scopes.cc
+++ b/src/3rdparty/v8/src/scopes.cc
@@ -29,6 +29,7 @@
#include "scopes.h"
+#include "accessors.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "messages.h"
@@ -59,7 +60,9 @@ static bool Match(void* key1, void* key2) {
}
-VariableMap::VariableMap() : ZoneHashMap(Match, 8) {}
+VariableMap::VariableMap(Zone* zone)
+ : ZoneHashMap(Match, 8, ZoneAllocationPolicy(zone)),
+ zone_(zone) {}
VariableMap::~VariableMap() {}
@@ -71,24 +74,26 @@ Variable* VariableMap::Declare(
Variable::Kind kind,
InitializationFlag initialization_flag,
Interface* interface) {
- Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true);
+ Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true,
+ ZoneAllocationPolicy(zone()));
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
ASSERT(p->key == name.location());
- p->value = new Variable(scope,
- name,
- mode,
- is_valid_lhs,
- kind,
- initialization_flag,
- interface);
+ p->value = new(zone()) Variable(scope,
+ name,
+ mode,
+ is_valid_lhs,
+ kind,
+ initialization_flag,
+ interface);
}
return reinterpret_cast<Variable*>(p->value);
}
Variable* VariableMap::Lookup(Handle<String> name) {
- Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false);
+ Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false,
+ ZoneAllocationPolicy(NULL));
if (p != NULL) {
ASSERT(*reinterpret_cast<String**>(p->key) == *name);
ASSERT(p->value != NULL);
@@ -101,39 +106,40 @@ Variable* VariableMap::Lookup(Handle<String> name) {
// ----------------------------------------------------------------------------
// Implementation of Scope
-Scope::Scope(Scope* outer_scope, ScopeType type)
+Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone)
: isolate_(Isolate::Current()),
- inner_scopes_(4),
- variables_(),
- temps_(4),
- params_(4),
- unresolved_(16),
- decls_(4),
+ inner_scopes_(4, zone),
+ variables_(zone),
+ temps_(4, zone),
+ params_(4, zone),
+ unresolved_(16, zone),
+ decls_(4, zone),
interface_(FLAG_harmony_modules &&
(type == MODULE_SCOPE || type == GLOBAL_SCOPE)
- ? Interface::NewModule() : NULL),
- already_resolved_(false) {
+ ? Interface::NewModule(zone) : NULL),
+ already_resolved_(false),
+ zone_(zone) {
SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
- // At some point we might want to provide outer scopes to
- // eval scopes (by walking the stack and reading the scope info).
- // In that case, the ASSERT below needs to be adjusted.
- ASSERT_EQ(type == GLOBAL_SCOPE, outer_scope == NULL);
+ // The outermost scope must be a global scope.
+ ASSERT(type == GLOBAL_SCOPE || outer_scope != NULL);
ASSERT(!HasIllegalRedeclaration());
}
Scope::Scope(Scope* inner_scope,
ScopeType type,
- Handle<ScopeInfo> scope_info)
+ Handle<ScopeInfo> scope_info,
+ Zone* zone)
: isolate_(Isolate::Current()),
- inner_scopes_(4),
- variables_(),
- temps_(4),
- params_(4),
- unresolved_(16),
- decls_(4),
+ inner_scopes_(4, zone),
+ variables_(zone),
+ temps_(4, zone),
+ params_(4, zone),
+ unresolved_(16, zone),
+ decls_(4, zone),
interface_(NULL),
- already_resolved_(true) {
+ already_resolved_(true),
+ zone_(zone) {
SetDefaults(type, NULL, scope_info);
if (!scope_info.is_null()) {
num_heap_slots_ = scope_info_->ContextLength();
@@ -145,16 +151,17 @@ Scope::Scope(Scope* inner_scope,
}
-Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name)
+Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
: isolate_(Isolate::Current()),
- inner_scopes_(1),
- variables_(),
- temps_(0),
- params_(0),
- unresolved_(0),
- decls_(0),
+ inner_scopes_(1, zone),
+ variables_(zone),
+ temps_(0, zone),
+ params_(0, zone),
+ unresolved_(0, zone),
+ decls_(0, zone),
interface_(NULL),
- already_resolved_(true) {
+ already_resolved_(true),
+ zone_(zone) {
SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
AddInnerScope(inner_scope);
++num_var_or_const_;
@@ -204,36 +211,53 @@ void Scope::SetDefaults(ScopeType type,
}
-Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope) {
+Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
+ Zone* zone) {
// Reconstruct the outer scope chain from a closure's context chain.
Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
bool contains_with = false;
- while (!context->IsGlobalContext()) {
+ while (!context->IsNativeContext()) {
if (context->IsWithContext()) {
- Scope* with_scope = new Scope(current_scope,
- WITH_SCOPE,
- Handle<ScopeInfo>::null());
+ Scope* with_scope = new(zone) Scope(current_scope,
+ WITH_SCOPE,
+ Handle<ScopeInfo>::null(),
+ zone);
current_scope = with_scope;
// All the inner scopes are inside a with.
contains_with = true;
for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
s->scope_inside_with_ = true;
}
+ } else if (context->IsGlobalContext()) {
+ ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+ current_scope = new(zone) Scope(current_scope,
+ GLOBAL_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ zone);
+ } else if (context->IsModuleContext()) {
+ ScopeInfo* scope_info = ScopeInfo::cast(context->module()->scope_info());
+ current_scope = new(zone) Scope(current_scope,
+ MODULE_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ zone);
} else if (context->IsFunctionContext()) {
ScopeInfo* scope_info = context->closure()->shared()->scope_info();
- current_scope = new Scope(current_scope,
- FUNCTION_SCOPE,
- Handle<ScopeInfo>(scope_info));
+ current_scope = new(zone) Scope(current_scope,
+ FUNCTION_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ zone);
} else if (context->IsBlockContext()) {
ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
- current_scope = new Scope(current_scope,
- BLOCK_SCOPE,
- Handle<ScopeInfo>(scope_info));
+ current_scope = new(zone) Scope(current_scope,
+ BLOCK_SCOPE,
+ Handle<ScopeInfo>(scope_info),
+ zone);
} else {
ASSERT(context->IsCatchContext());
String* name = String::cast(context->extension());
- current_scope = new Scope(current_scope, Handle<String>(name));
+ current_scope = new(zone) Scope(
+ current_scope, Handle<String>(name), zone);
}
if (contains_with) current_scope->RecordWithStatement();
if (innermost_scope == NULL) innermost_scope = current_scope;
@@ -265,7 +289,8 @@ bool Scope::Analyze(CompilationInfo* info) {
// Allocate the variables.
{
- AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate());
+ AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate(),
+ info->zone());
if (!top->AllocateVariables(info, &ast_node_factory)) return false;
}
@@ -309,7 +334,7 @@ void Scope::Initialize() {
// Add this scope as a new inner scope of the outer scope.
if (outer_scope_ != NULL) {
- outer_scope_->inner_scopes_.Add(this);
+ outer_scope_->inner_scopes_.Add(this, zone());
scope_inside_with_ = outer_scope_->scope_inside_with_ || is_with_scope();
} else {
scope_inside_with_ = is_with_scope();
@@ -374,7 +399,7 @@ Scope* Scope::FinalizeBlockScope() {
// Move unresolved variables
for (int i = 0; i < unresolved_.length(); i++) {
- outer_scope()->unresolved_.Add(unresolved_[i]);
+ outer_scope()->unresolved_.Add(unresolved_[i], zone());
}
return NULL;
@@ -405,13 +430,8 @@ Variable* Scope::LocalLookup(Handle<String> name) {
init_flag = kCreatedInitialized;
}
- Variable* var =
- variables_.Declare(this,
- name,
- mode,
- true,
- Variable::NORMAL,
- init_flag);
+ Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
+ init_flag);
var->AllocateTo(location, index);
return var;
}
@@ -426,7 +446,7 @@ Variable* Scope::LookupFunctionVar(Handle<String> name,
VariableMode mode;
int index = scope_info_->FunctionContextSlotIndex(*name, &mode);
if (index < 0) return NULL;
- Variable* var = new Variable(
+ Variable* var = new(zone()) Variable(
this, name, mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized);
VariableProxy* proxy = factory->NewVariableProxy(var);
@@ -455,9 +475,9 @@ Variable* Scope::Lookup(Handle<String> name) {
void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
ASSERT(!already_resolved());
ASSERT(is_function_scope());
- Variable* var = variables_.Declare(
- this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
- params_.Add(var);
+ Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
+ kCreatedInitialized);
+ params_.Add(var, zone());
}
@@ -469,17 +489,14 @@ Variable* Scope::DeclareLocal(Handle<String> name,
// This function handles VAR and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
- ASSERT(mode == VAR ||
- mode == CONST ||
- mode == CONST_HARMONY ||
- mode == LET);
+ ASSERT(IsDeclaredVariableMode(mode));
++num_var_or_const_;
return variables_.Declare(
this, name, mode, true, Variable::NORMAL, init_flag, interface);
}
-Variable* Scope::DeclareGlobal(Handle<String> name) {
+Variable* Scope::DeclareDynamicGlobal(Handle<String> name) {
ASSERT(is_global_scope());
return variables_.Declare(this,
name,
@@ -504,19 +521,19 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
Variable* Scope::NewTemporary(Handle<String> name) {
ASSERT(!already_resolved());
- Variable* var = new Variable(this,
- name,
- TEMPORARY,
- true,
- Variable::NORMAL,
- kCreatedInitialized);
- temps_.Add(var);
+ Variable* var = new(zone()) Variable(this,
+ name,
+ TEMPORARY,
+ true,
+ Variable::NORMAL,
+ kCreatedInitialized);
+ temps_.Add(var, zone());
return var;
}
void Scope::AddDeclaration(Declaration* declaration) {
- decls_.Add(declaration);
+ decls_.Add(declaration, zone());
}
@@ -582,6 +599,21 @@ VariableProxy* Scope::CheckAssignmentToConst() {
}
+class VarAndOrder {
+ public:
+ VarAndOrder(Variable* var, int order) : var_(var), order_(order) { }
+ Variable* var() const { return var_; }
+ int order() const { return order_; }
+ static int Compare(const VarAndOrder* a, const VarAndOrder* b) {
+ return a->order_ - b->order_;
+ }
+
+ private:
+ Variable* var_;
+ int order_;
+};
+
+
void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
ZoneList<Variable*>* context_locals) {
ASSERT(stack_locals != NULL);
@@ -592,21 +624,29 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
Variable* var = temps_[i];
if (var->is_used()) {
ASSERT(var->IsStackLocal());
- stack_locals->Add(var);
+ stack_locals->Add(var, zone());
}
}
+ ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
+
// Collect declared local variables.
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var->is_used()) {
- if (var->IsStackLocal()) {
- stack_locals->Add(var);
- } else if (var->IsContextSlot()) {
- context_locals->Add(var);
- }
+ vars.Add(VarAndOrder(var, p->order), zone());
+ }
+ }
+ vars.Sort(VarAndOrder::Compare);
+ int var_count = vars.length();
+ for (int i = 0; i < var_count; i++) {
+ Variable* var = vars[i].var();
+ if (var->IsStackLocal()) {
+ stack_locals->Add(var, zone());
+ } else if (var->IsContextSlot()) {
+ context_locals->Add(var, zone());
}
}
}
@@ -629,12 +669,13 @@ bool Scope::AllocateVariables(CompilationInfo* info,
// 3) Allocate variables.
AllocateVariablesRecursively();
- return true;
-}
-
+ // 4) Allocate and link module instance objects.
+ if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
+ AllocateModules(info);
+ LinkModules(info);
+ }
-bool Scope::AllowsLazyCompilation() const {
- return !force_eager_compilation_ && HasTrivialOuterContext();
+ return true;
}
@@ -662,23 +703,36 @@ bool Scope::HasTrivialOuterContext() const {
}
-bool Scope::AllowsLazyRecompilation() const {
- return !force_eager_compilation_ &&
- !TrivialDeclarationScopesBeforeWithScope();
-}
-
-
-bool Scope::TrivialDeclarationScopesBeforeWithScope() const {
+bool Scope::HasLazyCompilableOuterContext() const {
Scope* outer = outer_scope_;
- if (outer == NULL) return false;
+ if (outer == NULL) return true;
+ // There are several reasons that prevent lazy compilation:
+ // - This scope is inside a with scope and all declaration scopes between
+ // them have empty contexts. Such declaration scopes become invisible
+ // during scope info deserialization.
+ // - This scope is inside a strict eval scope with variables that are
+ // potentially context allocated in an artificial function scope that
+ // is not deserialized correctly.
outer = outer->DeclarationScope();
- while (outer != NULL) {
- if (outer->is_with_scope()) return true;
- if (outer->is_declaration_scope() && outer->num_heap_slots() > 0)
- return false;
- outer = outer->outer_scope_;
+ bool found_non_trivial_declarations = false;
+ for (const Scope* scope = outer; scope != NULL; scope = scope->outer_scope_) {
+ if (scope->is_eval_scope()) return false;
+ if (scope->is_with_scope() && !found_non_trivial_declarations) return false;
+ if (scope->is_declaration_scope() && scope->num_heap_slots() > 0) {
+ found_non_trivial_declarations = true;
+ }
}
- return false;
+ return true;
+}
+
+
+bool Scope::AllowsLazyCompilation() const {
+ return !force_eager_compilation_ && HasLazyCompilableOuterContext();
+}
+
+
+bool Scope::AllowsLazyCompilationWithoutContext() const {
+ return !force_eager_compilation_ && HasTrivialOuterContext();
}
@@ -703,7 +757,7 @@ Scope* Scope::DeclarationScope() {
Handle<ScopeInfo> Scope::GetScopeInfo() {
if (scope_info_.is_null()) {
- scope_info_ = ScopeInfo::Create(this);
+ scope_info_ = ScopeInfo::Create(this, zone());
}
return scope_info_;
}
@@ -889,7 +943,7 @@ void Scope::Print(int n) {
Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) {
- if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
+ if (dynamics_ == NULL) dynamics_ = new(zone()) DynamicScopePart(zone());
VariableMap* map = dynamics_->GetMap(mode);
Variable* var = map->Lookup(name);
if (var == NULL) {
@@ -979,6 +1033,24 @@ bool Scope::ResolveVariable(CompilationInfo* info,
switch (binding_kind) {
case BOUND:
// We found a variable binding.
+ if (is_qml_mode()) {
+ Handle<GlobalObject> global = isolate_->global_object();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
+ // Get the context before the debugger was entered.
+ SaveContext *save = isolate_->save_context();
+ while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
+ save = save->prev();
+
+ global = Handle<GlobalObject>(save->context()->global_object());
+ }
+#endif
+
+ if (!global->HasProperty(*(proxy->name()))) {
+ var->set_is_qml_global(true);
+ }
+ }
break;
case BOUND_EVAL_SHADOWED:
@@ -986,11 +1058,11 @@ bool Scope::ResolveVariable(CompilationInfo* info,
// gave up on it (e.g. by encountering a local with the same in the outer
// scope which was not promoted to a context, this can happen if we use
// debugger to evaluate arbitrary expressions at a break point).
- if (var->is_global()) {
+ if (var->IsGlobalObjectProperty()) {
var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
if (is_qml_mode()) {
- Handle<GlobalObject> global = isolate_->global();
+ Handle<GlobalObject> global = isolate_->global_object();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
@@ -999,11 +1071,11 @@ bool Scope::ResolveVariable(CompilationInfo* info,
while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
save = save->prev();
- global = Handle<GlobalObject>(save->context()->global());
+ global = Handle<GlobalObject>(save->context()->global_object());
}
#endif
- if (is_qml_mode() && !global->HasProperty(*(proxy->name()))) {
+ if (!global->HasProperty(*(proxy->name()))) {
var->set_is_qml_global(true);
}
}
@@ -1017,11 +1089,11 @@ bool Scope::ResolveVariable(CompilationInfo* info,
break;
case UNBOUND:
- // No binding has been found. Declare a variable in global scope.
- var = info->global_scope()->DeclareGlobal(proxy->name());
+ // No binding has been found. Declare a variable on the global object.
+ var = info->global_scope()->DeclareDynamicGlobal(proxy->name());
if (is_qml_mode()) {
- Handle<GlobalObject> global = isolate_->global();
+ Handle<GlobalObject> global = isolate_->global_object();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
@@ -1030,7 +1102,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
save = save->prev();
- global = Handle<GlobalObject>(save->context()->global());
+ global = Handle<GlobalObject>(save->context()->global_object());
}
#endif
@@ -1047,7 +1119,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
if (is_qml_mode()) {
- Handle<GlobalObject> global = isolate_->global();
+ Handle<GlobalObject> global = isolate_->global_object();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
@@ -1056,11 +1128,11 @@ bool Scope::ResolveVariable(CompilationInfo* info,
while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
save = save->prev();
- global = Handle<GlobalObject>(save->context()->global());
+ global = Handle<GlobalObject>(save->context()->global_object());
}
#endif
- if (is_qml_mode() && !global->HasProperty(*(proxy->name()))) {
+ if (!global->HasProperty(*(proxy->name()))) {
var->set_is_qml_global(true);
}
}
@@ -1082,7 +1154,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
if (FLAG_print_interface_details)
PrintF("# Resolve %s:\n", var->name()->ToAsciiArray());
#endif
- proxy->interface()->Unify(var->interface(), &ok);
+ proxy->interface()->Unify(var->interface(), zone(), &ok);
if (!ok) {
#ifdef DEBUG
if (FLAG_print_interfaces) {
@@ -1165,11 +1237,13 @@ bool Scope::MustAllocate(Variable* var) {
inner_scope_calls_eval_ ||
scope_contains_with_ ||
is_catch_scope() ||
- is_block_scope())) {
+ is_block_scope() ||
+ is_module_scope() ||
+ is_global_scope())) {
var->set_is_used(true);
}
// Global variables do not need to be allocated.
- return !var->is_global() && var->is_used();
+ return !var->IsGlobalObjectProperty() && var->is_used();
}
@@ -1183,11 +1257,11 @@ bool Scope::MustAllocateInContext(Variable* var) {
// catch-bound variables are always allocated in a context.
if (var->mode() == TEMPORARY) return false;
if (is_catch_scope() || is_block_scope() || is_module_scope()) return true;
+ if (is_global_scope() && IsLexicalVariableMode(var->mode())) return true;
return var->has_forced_context_allocation() ||
scope_calls_eval_ ||
inner_scope_calls_eval_ ||
- scope_contains_with_ ||
- var->is_global();
+ scope_contains_with_;
}
@@ -1288,11 +1362,19 @@ void Scope::AllocateNonParameterLocals() {
AllocateNonParameterLocal(temps_[i]);
}
+ ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
+
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
- AllocateNonParameterLocal(var);
+ vars.Add(VarAndOrder(var, p->order), zone());
+ }
+
+ vars.Sort(VarAndOrder::Compare);
+ int var_count = vars.length();
+ for (int i = 0; i < var_count; i++) {
+ AllocateNonParameterLocal(vars[i].var());
}
// For now, function_ must be allocated at the very end. If it gets
@@ -1353,4 +1435,77 @@ int Scope::ContextLocalCount() const {
(function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
}
+
+void Scope::AllocateModules(CompilationInfo* info) {
+ ASSERT(is_global_scope() || is_module_scope());
+
+ if (is_module_scope()) {
+ ASSERT(interface_->IsFrozen());
+ ASSERT(scope_info_.is_null());
+
+ // TODO(rossberg): This has to be the initial compilation of this code.
+ // We currently do not allow recompiling any module definitions.
+ Handle<ScopeInfo> scope_info = GetScopeInfo();
+ Factory* factory = info->isolate()->factory();
+ Handle<Context> context = factory->NewModuleContext(scope_info);
+ Handle<JSModule> instance = factory->NewJSModule(context, scope_info);
+ context->set_module(*instance);
+
+ bool ok;
+ interface_->MakeSingleton(instance, &ok);
+ ASSERT(ok);
+ }
+
+ // Allocate nested modules.
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ Scope* inner_scope = inner_scopes_.at(i);
+ if (inner_scope->is_module_scope()) {
+ inner_scope->AllocateModules(info);
+ }
+ }
+}
+
+
+void Scope::LinkModules(CompilationInfo* info) {
+ ASSERT(is_global_scope() || is_module_scope());
+
+ if (is_module_scope()) {
+ Handle<JSModule> instance = interface_->Instance();
+
+ // Populate the module instance object.
+ const PropertyAttributes ro_attr =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
+ const PropertyAttributes rw_attr =
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM);
+ for (Interface::Iterator it = interface_->iterator();
+ !it.done(); it.Advance()) {
+ if (it.interface()->IsModule()) {
+ Handle<Object> value = it.interface()->Instance();
+ ASSERT(!value.is_null());
+ JSReceiver::SetProperty(
+ instance, it.name(), value, ro_attr, kStrictMode);
+ } else {
+ Variable* var = LocalLookup(it.name());
+ ASSERT(var != NULL && var->IsContextSlot());
+ PropertyAttributes attr = var->is_const_mode() ? ro_attr : rw_attr;
+ Handle<AccessorInfo> info =
+ Accessors::MakeModuleExport(it.name(), var->index(), attr);
+ Handle<Object> result = SetAccessor(instance, info);
+ ASSERT(!(result.is_null() || result->IsUndefined()));
+ USE(result);
+ }
+ }
+ USE(JSObject::PreventExtensions(instance));
+ }
+
+ // Link nested modules.
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ Scope* inner_scope = inner_scopes_.at(i);
+ if (inner_scope->is_module_scope()) {
+ inner_scope->LinkModules(info);
+ }
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scopes.h b/src/3rdparty/v8/src/scopes.h
index 31847e2..e9425f0 100644
--- a/src/3rdparty/v8/src/scopes.h
+++ b/src/3rdparty/v8/src/scopes.h
@@ -40,7 +40,7 @@ class CompilationInfo;
// A hash map to support fast variable declaration and lookup.
class VariableMap: public ZoneHashMap {
public:
- VariableMap();
+ explicit VariableMap(Zone* zone);
virtual ~VariableMap();
@@ -53,6 +53,11 @@ class VariableMap: public ZoneHashMap {
Interface* interface = Interface::NewValue());
Variable* Lookup(Handle<String> name);
+
+ Zone* zone() const { return zone_; }
+
+ private:
+ Zone* zone_;
};
@@ -62,14 +67,19 @@ class VariableMap: public ZoneHashMap {
// and setup time for scopes that don't need them.
class DynamicScopePart : public ZoneObject {
public:
+ explicit DynamicScopePart(Zone* zone) {
+ for (int i = 0; i < 3; i++)
+ maps_[i] = new(zone->New(sizeof(VariableMap))) VariableMap(zone);
+ }
+
VariableMap* GetMap(VariableMode mode) {
int index = mode - DYNAMIC;
ASSERT(index >= 0 && index < 3);
- return &maps_[index];
+ return maps_[index];
}
private:
- VariableMap maps_[3];
+ VariableMap *maps_[3];
};
@@ -87,14 +97,15 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Construction
- Scope(Scope* outer_scope, ScopeType type);
+ Scope(Scope* outer_scope, ScopeType type, Zone* zone);
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
static bool Analyze(CompilationInfo* info);
- static Scope* DeserializeScopeChain(Context* context, Scope* global_scope);
+ static Scope* DeserializeScopeChain(Context* context, Scope* global_scope,
+ Zone* zone);
// The scope name is only used for printing/debugging.
void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
@@ -106,6 +117,8 @@ class Scope: public ZoneObject {
// tree and its children are reparented.
Scope* FinalizeBlockScope();
+ Zone* zone() const { return zone_; }
+
// ---------------------------------------------------------------------------
// Declarations
@@ -147,21 +160,21 @@ class Scope: public ZoneObject {
// global scope. The variable was introduced (possibly from an inner
// scope) by a reference to an unresolved variable with no intervening
// with statements or eval calls.
- Variable* DeclareGlobal(Handle<String> name);
+ Variable* DeclareDynamicGlobal(Handle<String> name);
// Create a new unresolved variable.
template<class Visitor>
VariableProxy* NewUnresolved(AstNodeFactory<Visitor>* factory,
Handle<String> name,
- int position = RelocInfo::kNoPosition,
- Interface* interface = Interface::NewValue()) {
+ Interface* interface = Interface::NewValue(),
+ int position = RelocInfo::kNoPosition) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
ASSERT(!already_resolved());
VariableProxy* proxy =
- factory->NewVariableProxy(name, false, position, interface);
- unresolved_.Add(proxy);
+ factory->NewVariableProxy(name, false, interface, position);
+ unresolved_.Add(proxy, zone_);
return proxy;
}
@@ -176,7 +189,7 @@ class Scope: public ZoneObject {
// Creates a new temporary variable in this scope. The name is only used
// for printing and cannot be used to find the variable. In particular,
// the only way to get hold of the temporary is by keeping the Variable*
- // around.
+ // around. The name should not clash with a legitimate variable names.
Variable* NewTemporary(Handle<String> name);
// Adds the specific declaration node to the list of declarations in
@@ -272,7 +285,8 @@ class Scope: public ZoneObject {
bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
bool is_with_scope() const { return type_ == WITH_SCOPE; }
bool is_declaration_scope() const {
- return is_eval_scope() || is_function_scope() || is_global_scope();
+ return is_eval_scope() || is_function_scope() ||
+ is_module_scope() || is_global_scope();
}
bool is_classic_mode() const {
return language_mode() == CLASSIC_MODE;
@@ -370,16 +384,14 @@ class Scope: public ZoneObject {
// Determine if we can use lazy compilation for this scope.
bool AllowsLazyCompilation() const;
- // True if we can lazily recompile functions with this scope.
- bool AllowsLazyRecompilation() const;
+ // Determine if we can use lazy compilation for this scope without a context.
+ bool AllowsLazyCompilationWithoutContext() const;
- // True if the outer context of this scope is always the global context.
+ // True if the outer context of this scope is always the native context.
bool HasTrivialOuterContext() const;
- // True if this scope is inside a with scope and all declaration scopes
- // between them have empty contexts. Such declaration scopes become
- // invisible during scope info deserialization.
- bool TrivialDeclarationScopesBeforeWithScope() const;
+ // True if the outer context allows lazy compilation of this scope.
+ bool HasLazyCompilableOuterContext() const;
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
@@ -590,16 +602,24 @@ class Scope: public ZoneObject {
bool AllocateVariables(CompilationInfo* info,
AstNodeFactory<AstNullVisitor>* factory);
+ // Instance objects have to be created ahead of time (before code generation)
+ // because of potentially cyclic references between them.
+ // Linking also has to be a separate stage, since populating one object may
+ // potentially require (forward) references to others.
+ void AllocateModules(CompilationInfo* info);
+ void LinkModules(CompilationInfo* info);
+
private:
// Construct a scope based on the scope info.
- Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info);
+ Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
+ Zone* zone);
// Construct a catch scope with a binding for the name.
- Scope(Scope* inner_scope, Handle<String> catch_variable_name);
+ Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone);
void AddInnerScope(Scope* inner_scope) {
if (inner_scope != NULL) {
- inner_scopes_.Add(inner_scope);
+ inner_scopes_.Add(inner_scope, zone_);
inner_scope->outer_scope_ = this;
}
}
@@ -607,6 +627,8 @@ class Scope: public ZoneObject {
void SetDefaults(ScopeType type,
Scope* outer_scope,
Handle<ScopeInfo> scope_info);
+
+ Zone* zone_;
};
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/serialize.cc b/src/3rdparty/v8/src/serialize.cc
index cf8e5e1..dfc5574 100644
--- a/src/3rdparty/v8/src/serialize.cc
+++ b/src/3rdparty/v8/src/serialize.cc
@@ -37,6 +37,7 @@
#include "platform.h"
#include "runtime.h"
#include "serialize.h"
+#include "snapshot.h"
#include "stub-cache.h"
#include "v8threads.h"
@@ -510,6 +511,22 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
47,
"date_cache_stamp");
+ Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
+ UNCLASSIFIED,
+ 48,
+ "address_of_pending_message_obj");
+ Add(ExternalReference::address_of_has_pending_message(isolate).address(),
+ UNCLASSIFIED,
+ 49,
+ "address_of_has_pending_message");
+ Add(ExternalReference::address_of_pending_message_script(isolate).address(),
+ UNCLASSIFIED,
+ 50,
+ "pending_message_script");
+ Add(ExternalReference::get_make_code_young_function(isolate).address(),
+ UNCLASSIFIED,
+ 51,
+ "Code::MakeCodeYoung");
}
@@ -586,104 +603,27 @@ Deserializer::Deserializer(SnapshotByteSource* source)
: isolate_(NULL),
source_(source),
external_reference_decoder_(NULL) {
-}
-
-
-// This routine both allocates a new object, and also keeps
-// track of where objects have been allocated so that we can
-// fix back references when deserializing.
-Address Deserializer::Allocate(int space_index, Space* space, int size) {
- Address address;
- if (!SpaceIsLarge(space_index)) {
- ASSERT(!SpaceIsPaged(space_index) ||
- size <= Page::kPageSize - Page::kObjectStartOffset);
- MaybeObject* maybe_new_allocation;
- if (space_index == NEW_SPACE) {
- maybe_new_allocation =
- reinterpret_cast<NewSpace*>(space)->AllocateRaw(size);
- } else {
- maybe_new_allocation =
- reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
- }
- ASSERT(!maybe_new_allocation->IsFailure());
- Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
- HeapObject* new_object = HeapObject::cast(new_allocation);
- address = new_object->address();
- high_water_[space_index] = address + size;
- } else {
- ASSERT(SpaceIsLarge(space_index));
- LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
- Object* new_allocation;
- if (space_index == kLargeData || space_index == kLargeFixedArray) {
- new_allocation =
- lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked();
- } else {
- ASSERT_EQ(kLargeCode, space_index);
- new_allocation =
- lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked();
- }
- HeapObject* new_object = HeapObject::cast(new_allocation);
- // Record all large objects in the same space.
- address = new_object->address();
- pages_[LO_SPACE].Add(address);
+ for (int i = 0; i < LAST_SPACE + 1; i++) {
+ reservations_[i] = kUninitializedReservation;
}
- last_object_address_ = address;
- return address;
-}
-
-
-// This returns the address of an object that has been described in the
-// snapshot as being offset bytes back in a particular space.
-HeapObject* Deserializer::GetAddressFromEnd(int space) {
- int offset = source_->GetInt();
- ASSERT(!SpaceIsLarge(space));
- offset <<= kObjectAlignmentBits;
- return HeapObject::FromAddress(high_water_[space] - offset);
-}
-
-
-// This returns the address of an object that has been described in the
-// snapshot as being offset bytes into a particular space.
-HeapObject* Deserializer::GetAddressFromStart(int space) {
- int offset = source_->GetInt();
- if (SpaceIsLarge(space)) {
- // Large spaces have one object per 'page'.
- return HeapObject::FromAddress(pages_[LO_SPACE][offset]);
- }
- offset <<= kObjectAlignmentBits;
- if (space == NEW_SPACE) {
- // New space has only one space - numbered 0.
- return HeapObject::FromAddress(pages_[space][0] + offset);
- }
- ASSERT(SpaceIsPaged(space));
- int page_of_pointee = offset >> kPageSizeBits;
- Address object_address = pages_[space][page_of_pointee] +
- (offset & Page::kPageAlignmentMask);
- return HeapObject::FromAddress(object_address);
}
void Deserializer::Deserialize() {
isolate_ = Isolate::Current();
ASSERT(isolate_ != NULL);
- // Don't GC while deserializing - just expand the heap.
- AlwaysAllocateScope always_allocate;
- // Don't use the free lists while deserializing.
- LinearAllocationScope allocate_linearly;
+ isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
// No active threads.
ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
// No active handles.
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
- // Make sure the entire partial snapshot cache is traversed, filling it with
- // valid object pointers.
- isolate_->set_serialize_partial_snapshot_cache_length(
- Isolate::kPartialSnapshotCacheCapacity);
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder();
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
- isolate_->heap()->set_global_contexts_list(
+ isolate_->heap()->set_native_contexts_list(
isolate_->heap()->undefined_value());
// Update data pointers to the external strings containing natives sources.
@@ -693,19 +633,33 @@ void Deserializer::Deserialize() {
ExternalAsciiString::cast(source)->update_data_cache();
}
}
+
+ // Issue code events for newly deserialized code objects.
+ LOG_CODE_EVENT(isolate_, LogCodeObjects());
+ LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
}
void Deserializer::DeserializePartial(Object** root) {
isolate_ = Isolate::Current();
- // Don't GC while deserializing - just expand the heap.
- AlwaysAllocateScope always_allocate;
- // Don't use the free lists while deserializing.
- LinearAllocationScope allocate_linearly;
+ for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
+ ASSERT(reservations_[i] != kUninitializedReservation);
+ }
+ isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
if (external_reference_decoder_ == NULL) {
external_reference_decoder_ = new ExternalReferenceDecoder();
}
+
+ // Keep track of the code space start and end pointers in case new
+ // code objects were unserialized
+ OldSpace* code_space = isolate_->heap()->code_space();
+ Address start_address = code_space->top();
VisitPointer(root);
+
+ // There's no code deserialized here. If this assert fires
+ // then that's changed and logging should be added to notify
+ // the profiler et al of the new code.
+ CHECK_EQ(start_address, code_space->top());
}
@@ -733,10 +687,9 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
void Deserializer::ReadObject(int space_number,
- Space* space,
Object** write_back) {
int size = source_->GetInt() << kObjectAlignmentBits;
- Address address = Allocate(space_number, space, size);
+ Address address = Allocate(space_number, size);
*write_back = HeapObject::FromAddress(address);
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
@@ -745,44 +698,19 @@ void Deserializer::ReadObject(int space_number,
}
ReadChunk(current, limit, space_number, address);
#ifdef DEBUG
- bool is_codespace = (space == HEAP->code_space()) ||
- ((space == HEAP->lo_space()) && (space_number == kLargeCode));
+ bool is_codespace = (space_number == CODE_SPACE);
ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace);
#endif
}
-
-// This macro is always used with a constant argument so it should all fold
-// away to almost nothing in the generated code. It might be nicer to do this
-// with the ternary operator but there are type issues with that.
-#define ASSIGN_DEST_SPACE(space_number) \
- Space* dest_space; \
- if (space_number == NEW_SPACE) { \
- dest_space = isolate->heap()->new_space(); \
- } else if (space_number == OLD_POINTER_SPACE) { \
- dest_space = isolate->heap()->old_pointer_space(); \
- } else if (space_number == OLD_DATA_SPACE) { \
- dest_space = isolate->heap()->old_data_space(); \
- } else if (space_number == CODE_SPACE) { \
- dest_space = isolate->heap()->code_space(); \
- } else if (space_number == MAP_SPACE) { \
- dest_space = isolate->heap()->map_space(); \
- } else if (space_number == CELL_SPACE) { \
- dest_space = isolate->heap()->cell_space(); \
- } else { \
- ASSERT(space_number >= LO_SPACE); \
- dest_space = isolate->heap()->lo_space(); \
- }
-
-
-static const int kUnknownOffsetFromStart = -1;
-
-
void Deserializer::ReadChunk(Object** current,
Object** limit,
int source_space,
Address current_object_address) {
Isolate* const isolate = isolate_;
+ // Write barrier support costs around 1% in startup time. In fact there
+ // are no new space objects in current boot snapshots, so it's not needed,
+ // but that may change.
bool write_barrier_needed = (current_object_address != NULL &&
source_space != NEW_SPACE &&
source_space != CELL_SPACE &&
@@ -798,21 +726,19 @@ void Deserializer::ReadChunk(Object** current,
ASSERT((within & ~kWhereToPointMask) == 0); \
ASSERT((space_number & ~kSpaceMask) == 0);
-#define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \
+#define CASE_BODY(where, how, within, space_number_if_any) \
{ \
bool emit_write_barrier = false; \
bool current_was_incremented = false; \
int space_number = space_number_if_any == kAnyOldSpace ? \
(data & kSpaceMask) : space_number_if_any; \
if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
- ASSIGN_DEST_SPACE(space_number) \
- ReadObject(space_number, dest_space, current); \
+ ReadObject(space_number, current); \
emit_write_barrier = (space_number == NEW_SPACE); \
} else { \
Object* new_object = NULL; /* May not be a real Object pointer. */ \
if (where == kNewObject) { \
- ASSIGN_DEST_SPACE(space_number) \
- ReadObject(space_number, dest_space, &new_object); \
+ ReadObject(space_number, &new_object); \
} else if (where == kRootArray) { \
int root_id = source_->GetInt(); \
new_object = isolate->heap()->roots_array_start()[root_id]; \
@@ -823,6 +749,9 @@ void Deserializer::ReadChunk(Object** current,
[cache_index]; \
emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else if (where == kExternalReference) { \
+ int skip = source_->GetInt(); \
+ current = reinterpret_cast<Object**>(reinterpret_cast<Address>( \
+ current) + skip); \
int reference_id = source_->GetInt(); \
Address address = external_reference_decoder_-> \
Decode(reference_id); \
@@ -831,21 +760,26 @@ void Deserializer::ReadChunk(Object** current,
emit_write_barrier = (space_number == NEW_SPACE); \
new_object = GetAddressFromEnd(data & kSpaceMask); \
} else { \
- ASSERT(where == kFromStart); \
- if (offset_from_start == kUnknownOffsetFromStart) { \
- emit_write_barrier = (space_number == NEW_SPACE); \
- new_object = GetAddressFromStart(data & kSpaceMask); \
+ ASSERT(where == kBackrefWithSkip); \
+ int skip = source_->GetInt(); \
+ current = reinterpret_cast<Object**>( \
+ reinterpret_cast<Address>(current) + skip); \
+ emit_write_barrier = (space_number == NEW_SPACE); \
+ new_object = GetAddressFromEnd(data & kSpaceMask); \
+ } \
+ if (within == kInnerPointer) { \
+ if (space_number != CODE_SPACE || new_object->IsCode()) { \
+ Code* new_code_object = reinterpret_cast<Code*>(new_object); \
+ new_object = reinterpret_cast<Object*>( \
+ new_code_object->instruction_start()); \
} else { \
- Address object_address = pages_[space_number][0] + \
- (offset_from_start << kObjectAlignmentBits); \
- new_object = HeapObject::FromAddress(object_address); \
+ ASSERT(space_number == CODE_SPACE); \
+ JSGlobalPropertyCell* cell = \
+ JSGlobalPropertyCell::cast(new_object); \
+ new_object = reinterpret_cast<Object*>( \
+ cell->ValueAddress()); \
} \
} \
- if (within == kFirstInstruction) { \
- Code* new_code_object = reinterpret_cast<Code*>(new_object); \
- new_object = reinterpret_cast<Object*>( \
- new_code_object->instruction_start()); \
- } \
if (how == kFromCode) { \
Address location_of_branch_data = \
reinterpret_cast<Address>(current); \
@@ -871,47 +805,18 @@ void Deserializer::ReadChunk(Object** current,
break; \
} \
-// This generates a case and a body for each space. The large object spaces are
-// very rare in snapshots so they are grouped in one body.
-#define ONE_PER_SPACE(where, how, within) \
- CASE_STATEMENT(where, how, within, NEW_SPACE) \
- CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
- CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
- CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, CELL_SPACE) \
- CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, MAP_SPACE) \
- CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, kLargeData) \
- CASE_STATEMENT(where, how, within, kLargeCode) \
- CASE_STATEMENT(where, how, within, kLargeFixedArray) \
- CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
-
// This generates a case and a body for the new space (which has to do extra
// write barrier handling) and handles the other spaces with 8 fall-through
// cases and one body.
#define ALL_SPACES(where, how, within) \
CASE_STATEMENT(where, how, within, NEW_SPACE) \
- CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \
+ CASE_BODY(where, how, within, NEW_SPACE) \
CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
CASE_STATEMENT(where, how, within, CELL_SPACE) \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
- CASE_STATEMENT(where, how, within, kLargeData) \
- CASE_STATEMENT(where, how, within, kLargeCode) \
- CASE_STATEMENT(where, how, within, kLargeFixedArray) \
- CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
-
-#define ONE_PER_CODE_SPACE(where, how, within) \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
- CASE_STATEMENT(where, how, within, kLargeCode) \
- CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart)
+ CASE_BODY(where, how, within, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
case byte_code: \
@@ -925,14 +830,48 @@ void Deserializer::ReadChunk(Object** current,
FOUR_CASES(byte_code + 8) \
FOUR_CASES(byte_code + 12)
+#define COMMON_RAW_LENGTHS(f) \
+ f(1) \
+ f(2) \
+ f(3) \
+ f(4) \
+ f(5) \
+ f(6) \
+ f(7) \
+ f(8) \
+ f(9) \
+ f(10) \
+ f(11) \
+ f(12) \
+ f(13) \
+ f(14) \
+ f(15) \
+ f(16) \
+ f(17) \
+ f(18) \
+ f(19) \
+ f(20) \
+ f(21) \
+ f(22) \
+ f(23) \
+ f(24) \
+ f(25) \
+ f(26) \
+ f(27) \
+ f(28) \
+ f(29) \
+ f(30) \
+ f(31)
+
// We generate 15 cases and bodies that process special tags that combine
// the raw data tag and the length into one byte.
-#define RAW_CASE(index, size) \
- case kRawData + index: { \
- byte* raw_data_out = reinterpret_cast<byte*>(current); \
- source_->CopyRaw(raw_data_out, size); \
- current = reinterpret_cast<Object**>(raw_data_out + size); \
- break; \
+#define RAW_CASE(index) \
+ case kRawData + index: { \
+ byte* raw_data_out = reinterpret_cast<byte*>(current); \
+ source_->CopyRaw(raw_data_out, index * kPointerSize); \
+ current = \
+ reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
+ break; \
}
COMMON_RAW_LENGTHS(RAW_CASE)
#undef RAW_CASE
@@ -943,12 +882,11 @@ void Deserializer::ReadChunk(Object** current,
int size = source_->GetInt();
byte* raw_data_out = reinterpret_cast<byte*>(current);
source_->CopyRaw(raw_data_out, size);
- current = reinterpret_cast<Object**>(raw_data_out + size);
break;
}
- SIXTEEN_CASES(kRootArrayLowConstants)
- SIXTEEN_CASES(kRootArrayHighConstants) {
+ SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance)
+ SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) {
int root_id = RootArrayConstantFromByteCode(data);
Object* object = isolate->heap()->roots_array_start()[root_id];
ASSERT(!isolate->heap()->InNewSpace(object));
@@ -956,6 +894,18 @@ void Deserializer::ReadChunk(Object** current,
break;
}
+ SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance)
+ SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance + 16) {
+ int root_id = RootArrayConstantFromByteCode(data);
+ int skip = source_->GetInt();
+ current = reinterpret_cast<Object**>(
+ reinterpret_cast<intptr_t>(current) + skip);
+ Object* object = isolate->heap()->roots_array_start()[root_id];
+ ASSERT(!isolate->heap()->InNewSpace(object));
+ *current++ = object;
+ break;
+ }
+
case kRepeat: {
int repeats = source_->GetInt();
Object* object = current[-1];
@@ -967,10 +917,11 @@ void Deserializer::ReadChunk(Object** current,
STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
Heap::kOldSpaceRoots);
- STATIC_ASSERT(kMaxRepeats == 12);
- FOUR_CASES(kConstantRepeat)
- FOUR_CASES(kConstantRepeat + 4)
- FOUR_CASES(kConstantRepeat + 8) {
+ STATIC_ASSERT(kMaxRepeats == 13);
+ case kConstantRepeat:
+ FOUR_CASES(kConstantRepeat + 1)
+ FOUR_CASES(kConstantRepeat + 5)
+ FOUR_CASES(kConstantRepeat + 9) {
int repeats = RepeatsForCode(data);
Object* object = current[-1];
ASSERT(!isolate->heap()->InNewSpace(object));
@@ -981,98 +932,80 @@ void Deserializer::ReadChunk(Object** current,
// Deserialize a new object and write a pointer to it to the current
// object.
- ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
- // Support for direct instruction pointers in functions
- ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction)
+ ALL_SPACES(kNewObject, kPlain, kStartOfObject)
+ // Support for direct instruction pointers in functions. It's an inner
+ // pointer because it points at the entry point, not at the start of the
+ // code object.
+ CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
+ CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
// Deserialize a new code object and write a pointer to its first
// instruction to the current code object.
- ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction)
+ ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
// Find a recently deserialized object using its offset from the current
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
+ ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
#if V8_TARGET_ARCH_MIPS
// Deserialize a new object from pointer found in code and write
// a pointer to it to the current object. Required only for MIPS, and
// omitted on the other architectures because it is fully unrolled and
// would cause bloat.
- ONE_PER_SPACE(kNewObject, kFromCode, kStartOfObject)
+ ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
// object. Required only for MIPS.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
- // Find an already deserialized code object using its offset from
- // the start and write a pointer to it to the current object.
- // Required only for MIPS.
- ALL_SPACES(kFromStart, kFromCode, kStartOfObject)
+ ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to its first instruction
// to the current code object or the instruction pointer in a function
// object.
- ALL_SPACES(kBackref, kFromCode, kFirstInstruction)
- ALL_SPACES(kBackref, kPlain, kFirstInstruction)
- // Find an already deserialized object using its offset from the start
- // and write a pointer to it to the current object.
- ALL_SPACES(kFromStart, kPlain, kStartOfObject)
- ALL_SPACES(kFromStart, kPlain, kFirstInstruction)
- // Find an already deserialized code object using its offset from the
- // start and write a pointer to its first instruction to the current code
- // object.
- ALL_SPACES(kFromStart, kFromCode, kFirstInstruction)
+ ALL_SPACES(kBackref, kFromCode, kInnerPointer)
+ ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
+ ALL_SPACES(kBackref, kPlain, kInnerPointer)
+ ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
// Find an object in the roots array and write a pointer to it to the
// current object.
CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
- CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart)
+ CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
CASE_BODY(kPartialSnapshotCache,
kPlain,
kStartOfObject,
- 0,
- kUnknownOffsetFromStart)
+ 0)
// Find an code entry in the partial snapshots cache and
// write a pointer to it to the current object.
- CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0)
+ CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
CASE_BODY(kPartialSnapshotCache,
kPlain,
- kFirstInstruction,
- 0,
- kUnknownOffsetFromStart)
+ kInnerPointer,
+ 0)
// Find an external reference and write a pointer to it to the current
// object.
CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
CASE_BODY(kExternalReference,
kPlain,
kStartOfObject,
- 0,
- kUnknownOffsetFromStart)
+ 0)
// Find an external reference and write a pointer to it in the current
// code object.
CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
CASE_BODY(kExternalReference,
kFromCode,
kStartOfObject,
- 0,
- kUnknownOffsetFromStart)
+ 0)
#undef CASE_STATEMENT
#undef CASE_BODY
-#undef ONE_PER_SPACE
#undef ALL_SPACES
-#undef ASSIGN_DEST_SPACE
-
- case kNewPage: {
- int space = source_->Get();
- pages_[space].Add(last_object_address_);
- if (space == CODE_SPACE) {
- CPU::FlushICache(last_object_address_, Page::kPageSize);
- }
- break;
- }
case kSkip: {
- current++;
+ int size = source_->GetInt();
+ current = reinterpret_cast<Object**>(
+ reinterpret_cast<intptr_t>(current) + size);
break;
}
@@ -1097,18 +1030,20 @@ void Deserializer::ReadChunk(Object** current,
UNREACHABLE();
}
}
- ASSERT_EQ(current, limit);
+ ASSERT_EQ(limit, current);
}
void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
- const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7;
- for (int shift = max_shift; shift > 0; shift -= 7) {
- if (integer >= static_cast<uintptr_t>(1u) << shift) {
- Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart");
- }
- }
- PutSection(static_cast<int>(integer & 0x7f), "IntLastPart");
+ ASSERT(integer < 1 << 22);
+ integer <<= 2;
+ int bytes = 1;
+ if (integer > 0xff) bytes = 2;
+ if (integer > 0xffff) bytes = 3;
+ integer |= bytes;
+ Put(static_cast<int>(integer & 0xff), "IntPart1");
+ if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
+ if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
}
@@ -1116,7 +1051,6 @@ Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),
current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder),
- large_object_total_(0),
root_index_wave_front_(0) {
isolate_ = Isolate::Current();
// The serializer is meant to be used only to generate initial heap images
@@ -1149,22 +1083,7 @@ void StartupSerializer::SerializeStrongReferences() {
void PartialSerializer::Serialize(Object** object) {
this->VisitPointer(object);
- Isolate* isolate = Isolate::Current();
-
- // After we have done the partial serialization the partial snapshot cache
- // will contain some references needed to decode the partial snapshot. We
- // fill it up with undefineds so it has a predictable length so the
- // deserialization code doesn't need to know the length.
- for (int index = isolate->serialize_partial_snapshot_cache_length();
- index < Isolate::kPartialSnapshotCacheCapacity;
- index++) {
- isolate->serialize_partial_snapshot_cache()[index] =
- isolate->heap()->undefined_value();
- startup_serializer_->VisitPointer(
- &isolate->serialize_partial_snapshot_cache()[index]);
- }
- isolate->set_serialize_partial_snapshot_cache_length(
- Isolate::kPartialSnapshotCacheCapacity);
+ Pad();
}
@@ -1179,14 +1098,14 @@ void Serializer::VisitPointers(Object** start, Object** end) {
if (reinterpret_cast<Address>(current) ==
isolate->heap()->store_buffer()->TopAddress()) {
sink_->Put(kSkip, "Skip");
+ sink_->PutInt(kPointerSize, "SkipOneWord");
} else if ((*current)->IsSmi()) {
- sink_->Put(kRawData, "RawData");
- sink_->PutInt(kPointerSize, "length");
+ sink_->Put(kRawData + 1, "Smi");
for (int i = 0; i < kPointerSize; i++) {
sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
}
} else {
- SerializeObject(*current, kPlain, kStartOfObject);
+ SerializeObject(*current, kPlain, kStartOfObject, 0);
}
}
}
@@ -1194,26 +1113,29 @@ void Serializer::VisitPointers(Object** start, Object** end) {
// This ensures that the partial snapshot cache keeps things alive during GC and
// tracks their movement. When it is called during serialization of the startup
-// snapshot the partial snapshot is empty, so nothing happens. When the partial
-// (context) snapshot is created, this array is populated with the pointers that
-// the partial snapshot will need. As that happens we emit serialized objects to
-// the startup snapshot that correspond to the elements of this cache array. On
-// deserialization we therefore need to visit the cache array. This fills it up
-// with pointers to deserialized objects.
+// snapshot nothing happens. When the partial (context) snapshot is created,
+// this array is populated with the pointers that the partial snapshot will
+// need. As that happens we emit serialized objects to the startup snapshot
+// that correspond to the elements of this cache array. On deserialization we
+// therefore need to visit the cache array. This fills it up with pointers to
+// deserialized objects.
void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
+ if (Serializer::enabled()) return;
Isolate* isolate = Isolate::Current();
- visitor->VisitPointers(
- isolate->serialize_partial_snapshot_cache(),
- &isolate->serialize_partial_snapshot_cache()[
- isolate->serialize_partial_snapshot_cache_length()]);
-}
-
-
-// When deserializing we need to set the size of the snapshot cache. This means
-// the root iteration code (above) will iterate over array elements, writing the
-// references to deserialized objects in them.
-void SerializerDeserializer::SetSnapshotCacheSize(int size) {
- Isolate::Current()->set_serialize_partial_snapshot_cache_length(size);
+ for (int i = 0; ; i++) {
+ if (isolate->serialize_partial_snapshot_cache_length() <= i) {
+ // Extend the array ready to get a value from the visitor when
+ // deserializing.
+ isolate->PushToPartialSnapshotCache(Smi::FromInt(0));
+ }
+ Object** cache = isolate->serialize_partial_snapshot_cache();
+ visitor->VisitPointers(&cache[i], &cache[i + 1]);
+ // Sentinel is the undefined object, which is a root so it will not normally
+ // be found in the cache.
+ if (cache[i] == isolate->heap()->undefined_value()) {
+ break;
+ }
+ }
}
@@ -1231,14 +1153,11 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
// then visit the pointer so that it becomes part of the startup snapshot
// and we can refer to it from the partial snapshot.
int length = isolate->serialize_partial_snapshot_cache_length();
- CHECK(length < Isolate::kPartialSnapshotCacheCapacity);
- isolate->serialize_partial_snapshot_cache()[length] = heap_object;
- startup_serializer_->VisitPointer(
- &isolate->serialize_partial_snapshot_cache()[length]);
+ isolate->PushToPartialSnapshotCache(heap_object);
+ startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
// We don't recurse from the startup snapshot generator into the partial
// snapshot generator.
- ASSERT(length == isolate->serialize_partial_snapshot_cache_length());
- isolate->set_serialize_partial_snapshot_cache_length(length + 1);
+ ASSERT(length == isolate->serialize_partial_snapshot_cache_length() - 1);
return length;
}
@@ -1273,58 +1192,50 @@ void Serializer::SerializeReferenceToPreviousObject(
int space,
int address,
HowToCode how_to_code,
- WhereToPoint where_to_point) {
+ WhereToPoint where_to_point,
+ int skip) {
int offset = CurrentAllocationAddress(space) - address;
- bool from_start = true;
- if (SpaceIsPaged(space)) {
- // For paged space it is simple to encode back from current allocation if
- // the object is on the same page as the current allocation pointer.
- if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
- (address >> kPageSizeBits)) {
- from_start = false;
- address = offset;
- }
- } else if (space == NEW_SPACE) {
- // For new space it is always simple to encode back from current allocation.
- if (offset < address) {
- from_start = false;
- address = offset;
- }
- }
- // If we are actually dealing with real offsets (and not a numbering of
- // all objects) then we should shift out the bits that are always 0.
- if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
- if (from_start) {
- sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
- sink_->PutInt(address, "address");
- } else {
+ // Shift out the bits that are always 0.
+ offset >>= kObjectAlignmentBits;
+ if (skip == 0) {
sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
- sink_->PutInt(address, "address");
+ } else {
+ sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
+ "BackRefSerWithSkip");
+ sink_->PutInt(skip, "BackRefSkipDistance");
}
+ sink_->PutInt(offset, "offset");
}
void StartupSerializer::SerializeObject(
Object* o,
HowToCode how_to_code,
- WhereToPoint where_to_point) {
+ WhereToPoint where_to_point,
+ int skip) {
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
int root_index;
if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
- PutRoot(root_index, heap_object, how_to_code, where_to_point);
+ PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
return;
}
if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfAlreadySerializedObject(heap_object);
+ int space = SpaceOfObject(heap_object);
int address = address_mapper_.MappedTo(heap_object);
SerializeReferenceToPreviousObject(space,
address,
how_to_code,
- where_to_point);
+ where_to_point,
+ skip);
} else {
+ if (skip != 0) {
+ sink_->Put(kSkip, "FlushPendingSkip");
+ sink_->PutInt(skip, "SkipDistance");
+ }
+
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this,
heap_object,
@@ -1337,32 +1248,41 @@ void StartupSerializer::SerializeObject(
void StartupSerializer::SerializeWeakReferences() {
- for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length();
- i < Isolate::kPartialSnapshotCacheCapacity;
- i++) {
- sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization");
- sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
- }
+ // This phase comes right after the partial serialization (of the snapshot).
+ // After we have done the partial serialization the partial snapshot cache
+ // will contain some references needed to decode the partial snapshot. We
+ // add one entry with 'undefined' which is the sentinel that the deserializer
+ // uses to know it is done deserializing the array.
+ Isolate* isolate = Isolate::Current();
+ Object* undefined = isolate->heap()->undefined_value();
+ VisitPointer(&undefined);
HEAP->IterateWeakRoots(this, VISIT_ALL);
+ Pad();
}
void Serializer::PutRoot(int root_index,
HeapObject* object,
SerializerDeserializer::HowToCode how_to_code,
- SerializerDeserializer::WhereToPoint where_to_point) {
+ SerializerDeserializer::WhereToPoint where_to_point,
+ int skip) {
if (how_to_code == kPlain &&
where_to_point == kStartOfObject &&
root_index < kRootArrayNumberOfConstantEncodings &&
!HEAP->InNewSpace(object)) {
- if (root_index < kRootArrayNumberOfLowConstantEncodings) {
- sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant");
+ if (skip == 0) {
+ sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
+ "RootConstant");
} else {
- sink_->Put(kRootArrayHighConstants + root_index -
- kRootArrayNumberOfLowConstantEncodings,
- "RootHiConstant");
+ sink_->Put(kRootArrayConstants + kHasSkipDistance + root_index,
+ "RootConstant");
+ sink_->PutInt(skip, "SkipInPutRoot");
}
} else {
+ if (skip != 0) {
+ sink_->Put(kSkip, "SkipFromPutRoot");
+ sink_->PutInt(skip, "SkipFromPutRootDistance");
+ }
sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
sink_->PutInt(root_index, "root_index");
}
@@ -1372,7 +1292,8 @@ void Serializer::PutRoot(int root_index,
void PartialSerializer::SerializeObject(
Object* o,
HowToCode how_to_code,
- WhereToPoint where_to_point) {
+ WhereToPoint where_to_point,
+ int skip) {
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
@@ -1380,16 +1301,21 @@ void PartialSerializer::SerializeObject(
// The code-caches link to context-specific code objects, which
// the startup and context serializes cannot currently handle.
ASSERT(Map::cast(heap_object)->code_cache() ==
- heap_object->GetHeap()->raw_unchecked_empty_fixed_array());
+ heap_object->GetHeap()->empty_fixed_array());
}
int root_index;
if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
- PutRoot(root_index, heap_object, how_to_code, where_to_point);
+ PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
return;
}
if (ShouldBeInThePartialSnapshotCache(heap_object)) {
+ if (skip != 0) {
+ sink_->Put(kSkip, "SkipFromSerializeObject");
+ sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+ }
+
int cache_index = PartialSnapshotCacheIndex(heap_object);
sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
"PartialSnapshotCache");
@@ -1406,13 +1332,18 @@ void PartialSerializer::SerializeObject(
ASSERT(!heap_object->IsSymbol());
if (address_mapper_.IsMapped(heap_object)) {
- int space = SpaceOfAlreadySerializedObject(heap_object);
+ int space = SpaceOfObject(heap_object);
int address = address_mapper_.MappedTo(heap_object);
SerializeReferenceToPreviousObject(space,
address,
how_to_code,
- where_to_point);
+ where_to_point,
+ skip);
} else {
+ if (skip != 0) {
+ sink_->Put(kSkip, "SkipFromSerializeObject");
+ sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+ }
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this,
heap_object,
@@ -1436,16 +1367,11 @@ void Serializer::ObjectSerializer::Serialize() {
SnapshotPositionEvent(object_->address(), sink_->Position()));
// Mark this object as already serialized.
- bool start_new_page;
- int offset = serializer_->Allocate(space, size, &start_new_page);
+ int offset = serializer_->Allocate(space, size);
serializer_->address_mapper()->AddMapping(object_, offset);
- if (start_new_page) {
- sink_->Put(kNewPage, "NewPage");
- sink_->PutSection(space, "NewPageSpace");
- }
// Serialize the map (first word of the object).
- serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject);
+ serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0);
// Serialize the rest of the object.
CHECK_EQ(0, bytes_processed_so_far_);
@@ -1486,7 +1412,8 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
}
} else {
- serializer_->SerializeObject(current_contents, kPlain, kStartOfObject);
+ serializer_->SerializeObject(
+ current_contents, kPlain, kStartOfObject, 0);
bytes_processed_so_far_ += kPointerSize;
current++;
}
@@ -1498,9 +1425,10 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
Object** current = rinfo->target_object_address();
- OutputRawData(rinfo->target_address_address());
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- serializer_->SerializeObject(*current, representation, kStartOfObject);
+ serializer_->SerializeObject(*current, representation, kStartOfObject, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
@@ -1508,10 +1436,12 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
Address* end) {
Address references_start = reinterpret_cast<Address>(start);
- OutputRawData(references_start);
+ int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
for (Address* current = start; current < end; current++) {
sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
+ sink_->PutInt(skip, "SkipB4ExternalRef");
+ skip = 0;
int reference_id = serializer_->EncodeExternalReference(*current);
sink_->PutInt(reference_id, "reference id");
}
@@ -1521,12 +1451,13 @@ void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
Address references_start = rinfo->target_address_address();
- OutputRawData(references_start);
+ int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
Address* current = rinfo->target_reference_address();
int representation = rinfo->IsCodedSpecially() ?
kFromCode + kStartOfObject : kPlain + kStartOfObject;
sink_->Put(kExternalReference + representation, "ExternalRef");
+ sink_->PutInt(skip, "SkipB4ExternalRef");
int reference_id = serializer_->EncodeExternalReference(*current);
sink_->PutInt(reference_id, "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
@@ -1535,7 +1466,7 @@ void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
Address target_start = rinfo->target_address_address();
- OutputRawData(target_start);
+ int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
Address target = rinfo->target_address();
uint32_t encoding = serializer_->EncodeExternalReference(target);
CHECK(target == NULL ? encoding == 0 : encoding != 0);
@@ -1547,6 +1478,7 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
representation = kStartOfObject + kPlain;
}
sink_->Put(kExternalReference + representation, "ExternalReference");
+ sink_->PutInt(skip, "SkipB4ExternalRef");
sink_->PutInt(encoding, "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
}
@@ -1555,25 +1487,27 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
CHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Address target_start = rinfo->target_address_address();
- OutputRawData(target_start);
+ int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(target, kFromCode, kFirstInstruction);
+ serializer_->SerializeObject(target, kFromCode, kInnerPointer, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- OutputRawData(entry_address);
- serializer_->SerializeObject(target, kPlain, kFirstInstruction);
+ int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
+ serializer_->SerializeObject(target, kPlain, kInnerPointer, skip);
bytes_processed_so_far_ += kPointerSize;
}
void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) {
- // We shouldn't have any global property cell references in code
- // objects in the snapshot.
- UNREACHABLE();
+ ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(rinfo->target_cell());
+ int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
+ serializer_->SerializeObject(cell, kPlain, kInnerPointer, skip);
}
@@ -1601,59 +1535,58 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
}
-void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
+int Serializer::ObjectSerializer::OutputRawData(
+ Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
Address object_start = object_->address();
+ Address base = object_start + bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
- int skipped = up_to_offset - bytes_processed_so_far_;
+ int to_skip = up_to_offset - bytes_processed_so_far_;
+ int bytes_to_output = to_skip;
+ bytes_processed_so_far_ += to_skip;
// This assert will fail if the reloc info gives us the target_address_address
// locations in a non-ascending order. Luckily that doesn't happen.
- ASSERT(skipped >= 0);
- if (skipped != 0) {
- Address base = object_start + bytes_processed_so_far_;
-#define RAW_CASE(index, length) \
- if (skipped == length) { \
+ ASSERT(to_skip >= 0);
+ bool outputting_code = false;
+ if (to_skip != 0 && code_object_ && !code_has_been_output_) {
+ // Output the code all at once and fix later.
+ bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
+ outputting_code = true;
+ code_has_been_output_ = true;
+ }
+ if (bytes_to_output != 0 &&
+ (!code_object_ || outputting_code)) {
+#define RAW_CASE(index) \
+ if (!outputting_code && bytes_to_output == index * kPointerSize && \
+ index * kPointerSize == to_skip) { \
sink_->PutSection(kRawData + index, "RawDataFixed"); \
+ to_skip = 0; /* This insn already skips. */ \
} else /* NOLINT */
COMMON_RAW_LENGTHS(RAW_CASE)
#undef RAW_CASE
{ /* NOLINT */
+ // We always end up here if we are outputting the code of a code object.
sink_->Put(kRawData, "RawData");
- sink_->PutInt(skipped, "length");
+ sink_->PutInt(bytes_to_output, "length");
}
- for (int i = 0; i < skipped; i++) {
+ for (int i = 0; i < bytes_to_output; i++) {
unsigned int data = base[i];
sink_->PutSection(data, "Byte");
}
- bytes_processed_so_far_ += skipped;
}
-}
-
-
-int Serializer::SpaceOfObject(HeapObject* object) {
- for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
- AllocationSpace s = static_cast<AllocationSpace>(i);
- if (HEAP->InSpace(object, s)) {
- if (i == LO_SPACE) {
- if (object->IsCode()) {
- return kLargeCode;
- } else if (object->IsFixedArray()) {
- return kLargeFixedArray;
- } else {
- return kLargeData;
- }
- }
- return i;
- }
+ if (to_skip != 0 && return_skip == kIgnoringReturn) {
+ sink_->Put(kSkip, "Skip");
+ sink_->PutInt(to_skip, "SkipDistance");
+ to_skip = 0;
}
- UNREACHABLE();
- return 0;
+ return to_skip;
}
-int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
+int Serializer::SpaceOfObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
if (HEAP->InSpace(object, s)) {
+ ASSERT(i < kNumberOfSpaces);
return i;
}
}
@@ -1662,34 +1595,8 @@ int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
}
-int Serializer::Allocate(int space, int size, bool* new_page) {
+int Serializer::Allocate(int space, int size) {
CHECK(space >= 0 && space < kNumberOfSpaces);
- if (SpaceIsLarge(space)) {
- // In large object space we merely number the objects instead of trying to
- // determine some sort of address.
- *new_page = true;
- large_object_total_ += size;
- return fullness_[LO_SPACE]++;
- }
- *new_page = false;
- if (fullness_[space] == 0) {
- *new_page = true;
- }
- if (SpaceIsPaged(space)) {
- // Paged spaces are a little special. We encode their addresses as if the
- // pages were all contiguous and each page were filled up in the range
- // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous
- // and allocation does not start at offset 0 in the page, but this scheme
- // means the deserializer can get the page number quickly by shifting the
- // serialized address.
- CHECK(IsPowerOf2(Page::kPageSize));
- int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
- CHECK(size <= SpaceAreaSize(space));
- if (used_in_this_page + size > SpaceAreaSize(space)) {
- *new_page = true;
- fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
- }
- }
int allocation_address = fullness_[space];
fullness_[space] = allocation_address + size;
return allocation_address;
@@ -1705,4 +1612,21 @@ int Serializer::SpaceAreaSize(int space) {
}
+void Serializer::Pad() {
+ // The non-branching GetInt will read up to 3 bytes too far, so we need
+ // to pad the snapshot to make sure we don't read over the end.
+ for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
+ sink_->Put(kNop, "Padding");
+ }
+}
+
+
+bool SnapshotByteSource::AtEOF() {
+ if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false;
+ for (int x = position_; x < length_; x++) {
+ if (data_[x] != SerializerDeserializer::nop()) return false;
+ }
+ return true;
+}
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/serialize.h b/src/3rdparty/v8/src/serialize.h
index f50e23e..2041792 100644
--- a/src/3rdparty/v8/src/serialize.h
+++ b/src/3rdparty/v8/src/serialize.h
@@ -170,13 +170,27 @@ class SnapshotByteSource {
return data_[position_++];
}
+ int32_t GetUnalignedInt() {
+#if defined(V8_HOST_CAN_READ_UNALIGNED) && __BYTE_ORDER == __LITTLE_ENDIAN
+ int32_t answer;
+ ASSERT(position_ + sizeof(answer) <= length_ + 0u);
+ answer = *reinterpret_cast<const int32_t*>(data_ + position_);
+#else
+ int32_t answer = data_[position_];
+ answer |= data_[position_ + 1] << 8;
+ answer |= data_[position_ + 2] << 16;
+ answer |= data_[position_ + 3] << 24;
+#endif
+ return answer;
+ }
+
+ void Advance(int by) { position_ += by; }
+
inline void CopyRaw(byte* to, int number_of_bytes);
inline int GetInt();
- bool AtEOF() {
- return position_ == length_;
- }
+ bool AtEOF();
int position() { return position_; }
@@ -187,48 +201,31 @@ class SnapshotByteSource {
};
-#define COMMON_RAW_LENGTHS(f) \
- f(1, 1) \
- f(2, 2) \
- f(3, 3) \
- f(4, 4) \
- f(5, 5) \
- f(6, 6) \
- f(7, 7) \
- f(8, 8) \
- f(9, 12) \
- f(10, 16) \
- f(11, 20) \
- f(12, 24) \
- f(13, 28) \
- f(14, 32) \
- f(15, 36)
-
// The Serializer/Deserializer class is a common superclass for Serializer and
// Deserializer which is used to store common constants and methods used by
// both.
class SerializerDeserializer: public ObjectVisitor {
public:
static void Iterate(ObjectVisitor* visitor);
- static void SetSnapshotCacheSize(int size);
+
+ static int nop() { return kNop; }
protected:
// Where the pointed-to object can be found:
enum Where {
kNewObject = 0, // Object is next in snapshot.
- // 1-8 One per space.
+ // 1-6 One per space.
kRootArray = 0x9, // Object is found in root array.
kPartialSnapshotCache = 0xa, // Object is in the cache.
kExternalReference = 0xb, // Pointer to an external reference.
- kSkip = 0xc, // Skip a pointer sized cell.
- // 0xd-0xf Free.
- kBackref = 0x10, // Object is described relative to end.
- // 0x11-0x18 One per space.
- // 0x19-0x1f Free.
- kFromStart = 0x20, // Object is described relative to start.
- // 0x21-0x28 One per space.
- // 0x29-0x2f Free.
- // 0x30-0x3f Used by misc. tags below.
+ kSkip = 0xc, // Skip n bytes.
+ kNop = 0xd, // Does nothing, used to pad.
+ // 0xe-0xf Free.
+ kBackref = 0x10, // Object is described relative to end.
+ // 0x11-0x16 One per space.
+ kBackrefWithSkip = 0x18, // Object is described relative to end.
+ // 0x19-0x1e One per space.
+ // 0x20-0x3f Used by misc. tags below.
kPointedToMask = 0x3f
};
@@ -240,17 +237,27 @@ class SerializerDeserializer: public ObjectVisitor {
kHowToCodeMask = 0x40
};
+ // For kRootArrayConstants
+ enum WithSkip {
+ kNoSkipDistance = 0,
+ kHasSkipDistance = 0x40,
+ kWithSkipMask = 0x40
+ };
+
// Where to point within the object.
enum WhereToPoint {
kStartOfObject = 0,
- kFirstInstruction = 0x80,
+ kInnerPointer = 0x80, // First insn in code object or payload of cell.
kWhereToPointMask = 0x80
};
// Misc.
- // Raw data to be copied from the snapshot.
- static const int kRawData = 0x30;
- // Some common raw lengths: 0x31-0x3f
+ // Raw data to be copied from the snapshot. This byte code does not advance
+ // the current pointer, which is used for code objects, where we write the
+ // entire code in one memcpy, then fix up stuff with kSkip and other byte
+ // codes that overwrite data.
+ static const int kRawData = 0x20;
+ // Some common raw lengths: 0x21-0x3f. These autoadvance the current pointer.
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
@@ -260,64 +267,44 @@ class SerializerDeserializer: public ObjectVisitor {
// Used for the source code of the natives, which is in the executable, but
// is referred to from external strings in the snapshot.
static const int kNativesStringResource = 0x71;
- static const int kNewPage = 0x72;
- static const int kRepeat = 0x73;
- static const int kConstantRepeat = 0x74;
- // 0x74-0x7f Repeat last word (subtract 0x73 to get the count).
- static const int kMaxRepeats = 0x7f - 0x73;
+ static const int kRepeat = 0x72;
+ static const int kConstantRepeat = 0x73;
+ // 0x73-0x7f Repeat last word (subtract 0x72 to get the count).
+ static const int kMaxRepeats = 0x7f - 0x72;
static int CodeForRepeats(int repeats) {
ASSERT(repeats >= 1 && repeats <= kMaxRepeats);
- return 0x73 + repeats;
+ return 0x72 + repeats;
}
static int RepeatsForCode(int byte_code) {
ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f);
- return byte_code - 0x73;
+ return byte_code - 0x72;
}
- static const int kRootArrayLowConstants = 0xb0;
- // 0xb0-0xbf Things from the first 16 elements of the root array.
- static const int kRootArrayHighConstants = 0xf0;
- // 0xf0-0xff Things from the next 16 elements of the root array.
+ static const int kRootArrayConstants = 0xa0;
+ // 0xa0-0xbf Things from the first 32 elements of the root array.
static const int kRootArrayNumberOfConstantEncodings = 0x20;
- static const int kRootArrayNumberOfLowConstantEncodings = 0x10;
static int RootArrayConstantFromByteCode(int byte_code) {
- int constant = (byte_code & 0xf) | ((byte_code & 0x40) >> 2);
- ASSERT(constant >= 0 && constant < kRootArrayNumberOfConstantEncodings);
- return constant;
+ return byte_code & 0x1f;
}
-
- static const int kLargeData = LAST_SPACE;
- static const int kLargeCode = kLargeData + 1;
- static const int kLargeFixedArray = kLargeCode + 1;
- static const int kNumberOfSpaces = kLargeFixedArray + 1;
+ static const int kNumberOfSpaces = LO_SPACE;
static const int kAnyOldSpace = -1;
// A bitmask for getting the space out of an instruction.
- static const int kSpaceMask = 15;
-
- static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
- static inline bool SpaceIsPaged(int space) {
- return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
- }
+ static const int kSpaceMask = 7;
};
int SnapshotByteSource::GetInt() {
- // A little unwind to catch the really small ints.
- int snapshot_byte = Get();
- if ((snapshot_byte & 0x80) == 0) {
- return snapshot_byte;
- }
- int accumulator = (snapshot_byte & 0x7f) << 7;
- while (true) {
- snapshot_byte = Get();
- if ((snapshot_byte & 0x80) == 0) {
- return accumulator | snapshot_byte;
- }
- accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
- }
- UNREACHABLE();
- return accumulator;
+ // This way of variable-length encoding integers does not suffer from branch
+ // mispredictions.
+ uint32_t answer = GetUnalignedInt();
+ int bytes = answer & 3;
+ Advance(bytes);
+ uint32_t mask = 0xffffffffu;
+ mask >>= 32 - (bytes << 3);
+ answer &= mask;
+ answer >>= 2;
+ return answer;
}
@@ -341,6 +328,12 @@ class Deserializer: public SerializerDeserializer {
// Deserialize a single object and the objects reachable from it.
void DeserializePartial(Object** root);
+ void set_reservation(int space_number, int reservation) {
+ ASSERT(space_number >= 0);
+ ASSERT(space_number <= LAST_SPACE);
+ reservations_[space_number] = reservation;
+ }
+
private:
virtual void VisitPointers(Object** start, Object** end);
@@ -359,28 +352,36 @@ class Deserializer: public SerializerDeserializer {
// the heap.
void ReadChunk(
Object** start, Object** end, int space, Address object_address);
- HeapObject* GetAddressFromStart(int space);
- inline HeapObject* GetAddressFromEnd(int space);
- Address Allocate(int space_number, Space* space, int size);
- void ReadObject(int space_number, Space* space, Object** write_back);
+ void ReadObject(int space_number, Object** write_back);
+
+ // This routine both allocates a new object, and also keeps
+ // track of where objects have been allocated so that we can
+ // fix back references when deserializing.
+ Address Allocate(int space_index, int size) {
+ Address address = high_water_[space_index];
+ high_water_[space_index] = address + size;
+ return address;
+ }
+
+ // This returns the address of an object that has been described in the
+ // snapshot as being offset bytes back in a particular space.
+ HeapObject* GetAddressFromEnd(int space) {
+ int offset = source_->GetInt();
+ offset <<= kObjectAlignmentBits;
+ return HeapObject::FromAddress(high_water_[space] - offset);
+ }
+
// Cached current isolate.
Isolate* isolate_;
- // Keep track of the pages in the paged spaces.
- // (In large object space we are keeping track of individual objects
- // rather than pages.) In new space we just need the address of the
- // first object and the others will flow from that.
- List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
-
SnapshotByteSource* source_;
// This is the address of the next object that will be allocated in each
// space. It is used to calculate the addresses of back-references.
Address high_water_[LAST_SPACE + 1];
- // This is the address of the most recent object that was allocated. It
- // is used to set the location of the new page when we encounter a
- // START_NEW_PAGE_SERIALIZATION tag.
- Address last_object_address_;
+
+ int reservations_[LAST_SPACE + 1];
+ static const intptr_t kUninitializedReservation = -1;
ExternalReferenceDecoder* external_reference_decoder_;
@@ -462,7 +463,7 @@ class Serializer : public SerializerDeserializer {
// You can call this after serialization to find out how much space was used
// in each space.
int CurrentAllocationAddress(int space) {
- if (SpaceIsLarge(space)) return large_object_total_;
+ ASSERT(space < kNumberOfSpaces);
return fullness_[space];
}
@@ -479,8 +480,11 @@ class Serializer : public SerializerDeserializer {
static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
static bool enabled() { return serialization_enabled_; }
SerializationAddressMapper* address_mapper() { return &address_mapper_; }
- void PutRoot(
- int index, HeapObject* object, HowToCode how, WhereToPoint where);
+ void PutRoot(int index,
+ HeapObject* object,
+ HowToCode how,
+ WhereToPoint where,
+ int skip);
protected:
static const int kInvalidRootIndex = -1;
@@ -504,7 +508,9 @@ class Serializer : public SerializerDeserializer {
object_(HeapObject::cast(o)),
sink_(sink),
reference_representation_(how_to_code + where_to_point),
- bytes_processed_so_far_(0) { }
+ bytes_processed_so_far_(0),
+ code_object_(o->IsCode()),
+ code_has_been_output_(false) { }
void Serialize();
void VisitPointers(Object** start, Object** end);
void VisitEmbeddedPointer(RelocInfo* target);
@@ -524,34 +530,36 @@ class Serializer : public SerializerDeserializer {
}
private:
- void OutputRawData(Address up_to);
+ enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
+ // This function outputs or skips the raw data between the last pointer and
+ // up to the current position. It optionally can just return the number of
+ // bytes to skip instead of performing a skip instruction, in case the skip
+ // can be merged into the next instruction.
+ int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
Serializer* serializer_;
HeapObject* object_;
SnapshotByteSink* sink_;
int reference_representation_;
int bytes_processed_so_far_;
+ bool code_object_;
+ bool code_has_been_output_;
};
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
- WhereToPoint where_to_point) = 0;
+ WhereToPoint where_to_point,
+ int skip) = 0;
void SerializeReferenceToPreviousObject(
int space,
int address,
HowToCode how_to_code,
- WhereToPoint where_to_point);
+ WhereToPoint where_to_point,
+ int skip);
void InitializeAllocators();
- // This will return the space for an object. If the object is in large
- // object space it may return kLargeCode or kLargeFixedArray in order
- // to indicate to the deserializer what kind of large object allocation
- // to make.
+ // This will return the space for an object.
static int SpaceOfObject(HeapObject* object);
- // This just returns the space of the object. It will return LO_SPACE
- // for all large objects since you can't check the type of the object
- // once the map has been used for the serialization address.
- static int SpaceOfAlreadySerializedObject(HeapObject* object);
- int Allocate(int space, int size, bool* new_page_started);
+ int Allocate(int space, int size);
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
@@ -560,9 +568,7 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
- // relative addresses for back references. Large objects are
- // just numbered sequentially since relative addresses make no
- // sense in large object space.
+ // relative addresses for back references.
int fullness_[LAST_SPACE + 1];
SnapshotByteSink* sink_;
int current_root_index_;
@@ -570,9 +576,9 @@ class Serializer : public SerializerDeserializer {
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
static bool too_late_to_enable_now_;
- int large_object_total_;
SerializationAddressMapper address_mapper_;
intptr_t root_index_wave_front_;
+ void Pad();
friend class ObjectSerializer;
friend class Deserializer;
@@ -595,7 +601,8 @@ class PartialSerializer : public Serializer {
virtual void Serialize(Object** o);
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
- WhereToPoint where_to_point);
+ WhereToPoint where_to_point,
+ int skip);
protected:
virtual int PartialSnapshotCacheIndex(HeapObject* o);
@@ -633,11 +640,13 @@ class StartupSerializer : public Serializer {
virtual void SerializeStrongReferences();
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
- WhereToPoint where_to_point);
+ WhereToPoint where_to_point,
+ int skip);
void SerializeWeakReferences();
void Serialize() {
SerializeStrongReferences();
SerializeWeakReferences();
+ Pad();
}
private:
diff --git a/src/3rdparty/v8/src/small-pointer-list.h b/src/3rdparty/v8/src/small-pointer-list.h
index 75fea06..295a06f 100644
--- a/src/3rdparty/v8/src/small-pointer-list.h
+++ b/src/3rdparty/v8/src/small-pointer-list.h
@@ -44,22 +44,22 @@ class SmallPointerList {
public:
SmallPointerList() : data_(kEmptyTag) {}
- explicit SmallPointerList(int capacity) : data_(kEmptyTag) {
- Reserve(capacity);
+ SmallPointerList(int capacity, Zone* zone) : data_(kEmptyTag) {
+ Reserve(capacity, zone);
}
- void Reserve(int capacity) {
+ void Reserve(int capacity, Zone* zone) {
if (capacity < 2) return;
if ((data_ & kTagMask) == kListTag) {
if (list()->capacity() >= capacity) return;
int old_length = list()->length();
- list()->AddBlock(NULL, capacity - list()->capacity());
+ list()->AddBlock(NULL, capacity - list()->capacity(), zone);
list()->Rewind(old_length);
return;
}
- PointerList* list = new PointerList(capacity);
+ PointerList* list = new(zone) PointerList(capacity, zone);
if ((data_ & kTagMask) == kSingletonTag) {
- list->Add(single_value());
+ list->Add(single_value(), zone);
}
ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
data_ = reinterpret_cast<intptr_t>(list) | kListTag;
@@ -83,21 +83,21 @@ class SmallPointerList {
return list()->length();
}
- void Add(T* pointer) {
+ void Add(T* pointer, Zone* zone) {
ASSERT(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment));
if ((data_ & kTagMask) == kEmptyTag) {
data_ = reinterpret_cast<intptr_t>(pointer) | kSingletonTag;
return;
}
if ((data_ & kTagMask) == kSingletonTag) {
- PointerList* list = new PointerList(2);
- list->Add(single_value());
- list->Add(pointer);
+ PointerList* list = new(zone) PointerList(2, zone);
+ list->Add(single_value(), zone);
+ list->Add(pointer, zone);
ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
data_ = reinterpret_cast<intptr_t>(list) | kListTag;
return;
}
- list()->Add(pointer);
+ list()->Add(pointer, zone);
}
// Note: returns T* and not T*& (unlike List from list.h).
diff --git a/src/3rdparty/v8/src/smart-array-pointer.h b/src/3rdparty/v8/src/smart-pointers.h
index 00721c1..345c4d4 100644
--- a/src/3rdparty/v8/src/smart-array-pointer.h
+++ b/src/3rdparty/v8/src/smart-pointers.h
@@ -25,34 +25,33 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_SMART_ARRAY_POINTER_H_
-#define V8_SMART_ARRAY_POINTER_H_
+#ifndef V8_SMART_POINTERS_H_
+#define V8_SMART_POINTERS_H_
namespace v8 {
namespace internal {
-// A 'scoped array pointer' that calls DeleteArray on its pointer when the
-// destructor is called.
-template<typename T>
-class SmartArrayPointer {
+template<typename Deallocator, typename T>
+class SmartPointerBase {
public:
// Default constructor. Constructs an empty scoped pointer.
- inline SmartArrayPointer() : p_(NULL) {}
+ inline SmartPointerBase() : p_(NULL) {}
// Constructs a scoped pointer from a plain one.
- explicit inline SmartArrayPointer(T* ptr) : p_(ptr) {}
+ explicit inline SmartPointerBase(T* ptr) : p_(ptr) {}
// Copy constructor removes the pointer from the original to avoid double
// freeing.
- inline SmartArrayPointer(const SmartArrayPointer<T>& rhs) : p_(rhs.p_) {
- const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
+ inline SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
+ : p_(rhs.p_) {
+ const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
}
// When the destructor of the scoped pointer is executed the plain pointer
// is deleted using DeleteArray. This implies that you must allocate with
// NewArray.
- inline ~SmartArrayPointer() { if (p_) DeleteArray(p_); }
+ inline ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
inline T* operator->() const { return p_; }
@@ -81,10 +80,11 @@ class SmartArrayPointer {
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.
- inline SmartArrayPointer& operator=(const SmartArrayPointer<T>& rhs) {
+ inline SmartPointerBase<Deallocator, T>& operator=(
+ const SmartPointerBase<Deallocator, T>& rhs) {
ASSERT(is_empty());
T* tmp = rhs.p_; // swap to handle self-assignment
- const_cast<SmartArrayPointer<T>&>(rhs).p_ = NULL;
+ const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
p_ = tmp;
return *this;
}
@@ -95,6 +95,45 @@ class SmartArrayPointer {
T* p_;
};
+// A 'scoped array pointer' that calls DeleteArray on its pointer when the
+// destructor is called.
+
+template<typename T>
+struct ArrayDeallocator {
+ static void Delete(T* array) {
+ DeleteArray(array);
+ }
+};
+
+
+template<typename T>
+class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
+ public:
+ inline SmartArrayPointer() { }
+ explicit inline SmartArrayPointer(T* ptr)
+ : SmartPointerBase<ArrayDeallocator<T>, T>(ptr) { }
+ inline SmartArrayPointer(const SmartArrayPointer<T>& rhs)
+ : SmartPointerBase<ArrayDeallocator<T>, T>(rhs) { }
+};
+
+
+template<typename T>
+struct ObjectDeallocator {
+ static void Delete(T* array) {
+ Malloced::Delete(array);
+ }
+};
+
+template<typename T>
+class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
+ public:
+ inline SmartPointer() { }
+ explicit inline SmartPointer(T* ptr)
+ : SmartPointerBase<ObjectDeallocator<T>, T>(ptr) { }
+ inline SmartPointer(const SmartPointer<T>& rhs)
+ : SmartPointerBase<ObjectDeallocator<T>, T>(rhs) { }
+};
+
} } // namespace v8::internal
-#endif // V8_SMART_ARRAY_POINTER_H_
+#endif // V8_SMART_POINTERS_H_
diff --git a/src/3rdparty/v8/src/snapshot-common.cc b/src/3rdparty/v8/src/snapshot-common.cc
index ef89a5e..a8806f0 100644
--- a/src/3rdparty/v8/src/snapshot-common.cc
+++ b/src/3rdparty/v8/src/snapshot-common.cc
@@ -37,10 +37,47 @@
namespace v8 {
namespace internal {
-bool Snapshot::Deserialize(const byte* content, int len) {
- SnapshotByteSource source(content, len);
- Deserializer deserializer(&source);
- return V8::Initialize(&deserializer);
+
+static void ReserveSpaceForSnapshot(Deserializer* deserializer,
+ const char* file_name) {
+ int file_name_length = StrLength(file_name) + 10;
+ Vector<char> name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(name, "%s.size", file_name);
+ FILE* fp = OS::FOpen(name.start(), "r");
+ CHECK_NE(NULL, fp);
+ int new_size, pointer_size, data_size, code_size, map_size, cell_size;
+#ifdef _MSC_VER
+ // Avoid warning about unsafe fscanf from MSVC.
+ // Please note that this is only fine if %c and %s are not being used.
+#define fscanf fscanf_s
+#endif
+ CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size));
+ CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size));
+ CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size));
+ CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
+ CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
+ CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
+#ifdef _MSC_VER
+#undef fscanf
+#endif
+ fclose(fp);
+ deserializer->set_reservation(NEW_SPACE, new_size);
+ deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size);
+ deserializer->set_reservation(OLD_DATA_SPACE, data_size);
+ deserializer->set_reservation(CODE_SPACE, code_size);
+ deserializer->set_reservation(MAP_SPACE, map_size);
+ deserializer->set_reservation(CELL_SPACE, cell_size);
+ name.Dispose();
+}
+
+
+void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
+ deserializer->set_reservation(NEW_SPACE, new_space_used_);
+ deserializer->set_reservation(OLD_POINTER_SPACE, pointer_space_used_);
+ deserializer->set_reservation(OLD_DATA_SPACE, data_space_used_);
+ deserializer->set_reservation(CODE_SPACE, code_space_used_);
+ deserializer->set_reservation(MAP_SPACE, map_space_used_);
+ deserializer->set_reservation(CELL_SPACE, cell_space_used_);
}
@@ -49,32 +86,44 @@ bool Snapshot::Initialize(const char* snapshot_file) {
int len;
byte* str = ReadBytes(snapshot_file, &len);
if (!str) return false;
- Deserialize(str, len);
+ bool success;
+ {
+ SnapshotByteSource source(str, len);
+ Deserializer deserializer(&source);
+ ReserveSpaceForSnapshot(&deserializer, snapshot_file);
+ success = V8::Initialize(&deserializer);
+ }
DeleteArray(str);
- return true;
+ return success;
} else if (size_ > 0) {
- Deserialize(raw_data_, raw_size_);
- return true;
+ SnapshotByteSource source(raw_data_, raw_size_);
+ Deserializer deserializer(&source);
+ ReserveSpaceForLinkedInSnapshot(&deserializer);
+ return V8::Initialize(&deserializer);
}
return false;
}
+bool Snapshot::HaveASnapshotToStartFrom() {
+ return size_ != 0;
+}
+
+
Handle<Context> Snapshot::NewContextFromSnapshot() {
if (context_size_ == 0) {
return Handle<Context>();
}
- HEAP->ReserveSpace(new_space_used_,
- pointer_space_used_,
- data_space_used_,
- code_space_used_,
- map_space_used_,
- cell_space_used_,
- large_space_used_);
SnapshotByteSource source(context_raw_data_,
context_raw_size_);
Deserializer deserializer(&source);
Object* root;
+ deserializer.set_reservation(NEW_SPACE, context_new_space_used_);
+ deserializer.set_reservation(OLD_POINTER_SPACE, context_pointer_space_used_);
+ deserializer.set_reservation(OLD_DATA_SPACE, context_data_space_used_);
+ deserializer.set_reservation(CODE_SPACE, context_code_space_used_);
+ deserializer.set_reservation(MAP_SPACE, context_map_space_used_);
+ deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
deserializer.DeserializePartial(&root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
diff --git a/src/3rdparty/v8/src/snapshot-empty.cc b/src/3rdparty/v8/src/snapshot-empty.cc
index 0b35720..70e7ab8 100644
--- a/src/3rdparty/v8/src/snapshot-empty.cc
+++ b/src/3rdparty/v8/src/snapshot-empty.cc
@@ -49,6 +49,12 @@ const int Snapshot::data_space_used_ = 0;
const int Snapshot::code_space_used_ = 0;
const int Snapshot::map_space_used_ = 0;
const int Snapshot::cell_space_used_ = 0;
-const int Snapshot::large_space_used_ = 0;
+
+const int Snapshot::context_new_space_used_ = 0;
+const int Snapshot::context_pointer_space_used_ = 0;
+const int Snapshot::context_data_space_used_ = 0;
+const int Snapshot::context_code_space_used_ = 0;
+const int Snapshot::context_map_space_used_ = 0;
+const int Snapshot::context_cell_space_used_ = 0;
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/snapshot.h b/src/3rdparty/v8/src/snapshot.h
index 4f01a2d..c4ae45e 100644
--- a/src/3rdparty/v8/src/snapshot.h
+++ b/src/3rdparty/v8/src/snapshot.h
@@ -40,6 +40,8 @@ class Snapshot {
// could be found.
static bool Initialize(const char* snapshot_file = NULL);
+ static bool HaveASnapshotToStartFrom();
+
// Create a new context using the internal partial snapshot.
static Handle<Context> NewContextFromSnapshot();
@@ -75,13 +77,18 @@ class Snapshot {
static const int code_space_used_;
static const int map_space_used_;
static const int cell_space_used_;
- static const int large_space_used_;
+ static const int context_new_space_used_;
+ static const int context_pointer_space_used_;
+ static const int context_data_space_used_;
+ static const int context_code_space_used_;
+ static const int context_map_space_used_;
+ static const int context_cell_space_used_;
static const int size_;
static const int raw_size_;
static const int context_size_;
static const int context_raw_size_;
- static bool Deserialize(const byte* content, int len);
+ static void ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer);
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
};
diff --git a/src/3rdparty/v8/src/spaces-inl.h b/src/3rdparty/v8/src/spaces-inl.h
index ed78fc7..8a576a8 100644
--- a/src/3rdparty/v8/src/spaces-inl.h
+++ b/src/3rdparty/v8/src/spaces-inl.h
@@ -214,6 +214,19 @@ MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
}
+void MemoryChunk::UpdateHighWaterMark(Address mark) {
+ if (mark == NULL) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationTop.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+ int new_mark = static_cast<int>(mark - chunk->address());
+ if (new_mark > chunk->high_water_mark_) {
+ chunk->high_water_mark_ = new_mark;
+ }
+}
+
+
PointerChunkIterator::PointerChunkIterator(Heap* heap)
: state_(kOldPointerState),
old_pointer_iterator_(heap->old_pointer_space()),
@@ -269,6 +282,10 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
return object;
}
+ ASSERT(!heap()->linear_allocation() ||
+ (anchor_.next_chunk() == &anchor_ &&
+ anchor_.prev_chunk() == &anchor_));
+
object = free_list_.Allocate(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
diff --git a/src/3rdparty/v8/src/spaces.cc b/src/3rdparty/v8/src/spaces.cc
index a0c8f2c..583b2ca 100644
--- a/src/3rdparty/v8/src/spaces.cc
+++ b/src/3rdparty/v8/src/spaces.cc
@@ -447,6 +447,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->InitializeReservedMemory();
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
+ chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
+ chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
@@ -496,6 +498,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
VirtualMemory reservation;
Address area_start = NULL;
Address area_end = NULL;
+
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
OS::CommitPageSize()) + CodePageGuardSize();
@@ -528,10 +531,11 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
size_executable_ += reservation.size();
}
-#ifdef DEBUG
- ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), body_size);
-#endif
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(base, CodePageGuardStartOffset());
+ ZapBlock(base + CodePageAreaStartOffset(), body_size);
+ }
+
area_start = base + CodePageAreaStartOffset();
area_end = area_start + body_size;
} else {
@@ -543,9 +547,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
if (base == NULL) return NULL;
-#ifdef DEBUG
- ZapBlock(base, chunk_size);
-#endif
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(base, chunk_size);
+ }
area_start = base + Page::kObjectStartOffset;
area_end = base + chunk_size;
@@ -621,9 +625,11 @@ bool MemoryAllocator::CommitBlock(Address start,
size_t size,
Executability executable) {
if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
-#ifdef DEBUG
- ZapBlock(start, size);
-#endif
+
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(start, size);
+ }
+
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
return true;
}
@@ -819,6 +825,18 @@ void PagedSpace::TearDown() {
}
+size_t PagedSpace::CommittedPhysicalMemory() {
+ if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ size_t size = 0;
+ PageIterator it(this);
+ while (it.has_next()) {
+ size += it.next()->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+
MaybeObject* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on precisely swept spaces.
ASSERT(!heap()->mark_compact_collector()->in_use());
@@ -881,10 +899,10 @@ intptr_t PagedSpace::SizeOfFirstPage() {
size = 192 * KB;
break;
case MAP_SPACE:
- size = 128 * KB;
+ size = 16 * kPointerSize * KB;
break;
case CELL_SPACE:
- size = 96 * KB;
+ size = 16 * kPointerSize * KB;
break;
case CODE_SPACE:
if (kPointerSize == 8) {
@@ -984,8 +1002,7 @@ void PagedSpace::ReleaseAllUnusedPages() {
void PagedSpace::Print() { }
#endif
-
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void PagedSpace::Verify(ObjectVisitor* visitor) {
// We can only iterate over the pages if they were swept precisely.
if (was_swept_conservatively_) return;
@@ -995,23 +1012,23 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
- ASSERT(page->owner() == this);
+ CHECK(page->owner() == this);
if (page == Page::FromAllocationTop(allocation_info_.top)) {
allocation_pointer_found_in_space = true;
}
- ASSERT(page->WasSweptPrecisely());
+ CHECK(page->WasSweptPrecisely());
HeapObjectIterator it(page, NULL);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
int black_size = 0;
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- ASSERT(end_of_previous_object <= object->address());
+ CHECK(end_of_previous_object <= object->address());
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
+ CHECK(map->IsMap());
+ CHECK(heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -1026,15 +1043,14 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
black_size += size;
}
- ASSERT(object->address() + size <= top);
+ CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
- ASSERT_LE(black_size, page->LiveBytes());
+ CHECK_LE(black_size, page->LiveBytes());
}
- ASSERT(allocation_pointer_found_in_space);
+ CHECK(allocation_pointer_found_in_space);
}
-#endif
-
+#endif // VERIFY_HEAP
// -----------------------------------------------------------------------------
// NewSpace implementation
@@ -1172,6 +1188,7 @@ void NewSpace::Shrink() {
void NewSpace::UpdateAllocationInfo() {
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
allocation_info_.top = to_space_.page_low();
allocation_info_.limit = to_space_.page_high();
@@ -1258,7 +1275,7 @@ MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify() {
@@ -1307,8 +1324,8 @@ void NewSpace::Verify() {
}
// Check semi-spaces.
- ASSERT_EQ(from_space_.id(), kFromSpace);
- ASSERT_EQ(to_space_.id(), kToSpace);
+ CHECK_EQ(from_space_.id(), kFromSpace);
+ CHECK_EQ(to_space_.id(), kToSpace);
from_space_.Verify();
to_space_.Verify();
}
@@ -1384,6 +1401,17 @@ bool SemiSpace::Uncommit() {
}
+size_t SemiSpace::CommittedPhysicalMemory() {
+ if (!is_committed()) return 0;
+ size_t size = 0;
+ NewSpacePageIterator it(this);
+ while (it.has_next()) {
+ size += it.next()->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+
bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
@@ -1524,8 +1552,9 @@ void SemiSpace::set_age_mark(Address mark) {
#ifdef DEBUG
void SemiSpace::Print() { }
+#endif
-
+#ifdef VERIFY_HEAP
void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace);
NewSpacePage* page = anchor_.next_page();
@@ -1555,8 +1584,9 @@ void SemiSpace::Verify() {
page = page->next_page();
}
}
+#endif
-
+#ifdef DEBUG
void SemiSpace::AssertValidRange(Address start, Address end) {
// Addresses belong to same semi-space
NewSpacePage* page = NewSpacePage::FromLimit(start);
@@ -1816,6 +1846,17 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
promoted_histogram_[type].increment_bytes(obj->Size());
}
+
+size_t NewSpace::CommittedPhysicalMemory() {
+ if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ size_t size = to_space_.CommittedPhysicalMemory();
+ if (from_space_.is_committed()) {
+ size += from_space_.CommittedPhysicalMemory();
+ }
+ return size;
+}
+
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
@@ -2027,15 +2068,16 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
+ owner_->heap()->incremental_marking()->OldSpaceStep(
+ size_in_bytes - old_linear_size);
+
#ifdef DEBUG
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
+ reinterpret_cast<Object**>(new_node->address())[i] =
+ Smi::FromInt(kCodeZapValue);
}
#endif
- owner_->heap()->incremental_marking()->OldSpaceStep(
- size_in_bytes - old_linear_size);
-
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
@@ -2257,11 +2299,40 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
Free(top(), old_linear_size);
SetTop(new_area->address(), new_area->address() + size_in_bytes);
- Allocate(size_in_bytes);
return true;
}
+static void RepairFreeList(Heap* heap, FreeListNode* n) {
+ while (n != NULL) {
+ Map** map_location = reinterpret_cast<Map**>(n->address());
+ if (*map_location == NULL) {
+ *map_location = heap->free_space_map();
+ } else {
+ ASSERT(*map_location == heap->free_space_map());
+ }
+ n = n->next();
+ }
+}
+
+
+void FreeList::RepairLists(Heap* heap) {
+ RepairFreeList(heap, small_list_);
+ RepairFreeList(heap, medium_list_);
+ RepairFreeList(heap, large_list_);
+ RepairFreeList(heap, huge_list_);
+}
+
+
+// After we have booted, we have created a map which represents free space
+// on the heap. If there was already a free list then the elements on it
+// were created with the wrong FreeSpaceMap (normally NULL), so we need to
+// fix them.
+void PagedSpace::RepairFreeListsAfterBoot() {
+ free_list_.RepairLists(heap());
+}
+
+
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
@@ -2520,25 +2591,27 @@ void FixedSpace::PrepareForMarkCompact() {
// -----------------------------------------------------------------------------
// MapSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
-#ifdef DEBUG
void MapSpace::VerifyObject(HeapObject* object) {
// The object should be a map or a free-list node.
- ASSERT(object->IsMap() || object->IsFreeSpace());
+ CHECK(object->IsMap() || object->IsFreeSpace());
}
-#endif
// -----------------------------------------------------------------------------
// GlobalPropertyCellSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
-#ifdef DEBUG
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list node.
- ASSERT(object->IsJSGlobalPropertyCell() ||
+ CHECK(object->IsJSGlobalPropertyCell() ||
object->map() == heap()->two_pointer_filler_map());
}
-#endif
// -----------------------------------------------------------------------------
@@ -2648,18 +2721,31 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
HeapObject* object = page->GetObject();
-#ifdef DEBUG
- // Make the object consistent so the heap can be vefified in OldSpaceStep.
- reinterpret_cast<Object**>(object->address())[0] =
- heap()->fixed_array_map();
- reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
-#endif
+ if (Heap::ShouldZapGarbage()) {
+ // Make the object consistent so the heap can be verified in OldSpaceStep.
+ // We only need to do this in debug builds or if verify_heap is on.
+ reinterpret_cast<Object**>(object->address())[0] =
+ heap()->fixed_array_map();
+ reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+ }
heap()->incremental_marking()->OldSpaceStep(object_size);
return object;
}
+size_t LargeObjectSpace::CommittedPhysicalMemory() {
+ if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ size_t size = 0;
+ LargePage* current = first_page_;
+ while (current != NULL) {
+ size += current->CommittedPhysicalMemory();
+ current = current->next_page();
+ }
+ return size;
+}
+
+
// GC support
MaybeObject* LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
@@ -2752,7 +2838,7 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify() {
@@ -2763,18 +2849,18 @@ void LargeObjectSpace::Verify() {
// object area start.
HeapObject* object = chunk->GetObject();
Page* page = Page::FromAddress(object->address());
- ASSERT(object->address() == page->area_start());
+ CHECK(object->address() == page->area_start());
// The first word should be a map, and we expect all map pointers to be
// in map space.
Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
+ CHECK(map->IsMap());
+ CHECK(heap()->map_space()->Contains(map));
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
// strings), fixed arrays, and byte arrays in large object space.
- ASSERT(object->IsCode() || object->IsSeqString() ||
+ CHECK(object->IsCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsFixedArray() ||
object->IsFixedDoubleArray() || object->IsByteArray());
@@ -2793,15 +2879,17 @@ void LargeObjectSpace::Verify() {
Object* element = array->get(j);
if (element->IsHeapObject()) {
HeapObject* element_object = HeapObject::cast(element);
- ASSERT(heap()->Contains(element_object));
- ASSERT(element_object->map()->IsMap());
+ CHECK(heap()->Contains(element_object));
+ CHECK(element_object->map()->IsMap());
}
}
}
}
}
+#endif
+#ifdef DEBUG
void LargeObjectSpace::Print() {
LargeObjectIterator it(this);
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
diff --git a/src/3rdparty/v8/src/spaces.h b/src/3rdparty/v8/src/spaces.h
index b0ecc5d..9121e9c 100644
--- a/src/3rdparty/v8/src/spaces.h
+++ b/src/3rdparty/v8/src/spaces.h
@@ -100,9 +100,6 @@ class Isolate;
#define ASSERT_OBJECT_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
-#define ASSERT_MAP_ALIGNED(address) \
- ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
-
#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
@@ -284,7 +281,9 @@ class Bitmap {
bool IsClean() {
for (int i = 0; i < CellsCount(); i++) {
- if (cells()[i] != 0) return false;
+ if (cells()[i] != 0) {
+ return false;
+ }
}
return true;
}
@@ -373,6 +372,11 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end();
}
+ // Every n write barrier invocations we go to runtime even though
+ // we could have handled it in generated code. This lets us check
+ // whether we have hit the limit and should do some more marking.
+ static const int kWriteBarrierCounterGranularity = 500;
+
enum MemoryChunkFlags {
IS_EXECUTABLE,
ABOUT_TO_BE_FREED,
@@ -468,6 +472,15 @@ class MemoryChunk {
return live_byte_count_;
}
+ int write_barrier_counter() {
+ return static_cast<int>(write_barrier_counter_);
+ }
+
+ void set_write_barrier_counter(int counter) {
+ write_barrier_counter_ = counter;
+ }
+
+
static void IncrementLiveBytesFromGC(Address address, int by) {
MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
}
@@ -488,11 +501,14 @@ class MemoryChunk {
static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
- static const size_t kHeaderSize =
+ static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize;
+ static const size_t kHeaderSize =
+ kWriteBarrierCounterOffset + kPointerSize + kPointerSize;
+
static const int kBodyOffset =
- CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
+ CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
// The start offset of the object area in a page. Aligned to both maps and
// code alignment to be suitable for both. Also aligned to 32 words because
@@ -601,6 +617,13 @@ class MemoryChunk {
return static_cast<int>(area_end() - area_start());
}
+ // Approximate amount of physical memory committed for this chunk.
+ size_t CommittedPhysicalMemory() {
+ return high_water_mark_;
+ }
+
+ static inline void UpdateHighWaterMark(Address mark);
+
protected:
MemoryChunk* next_chunk_;
MemoryChunk* prev_chunk_;
@@ -625,6 +648,10 @@ class MemoryChunk {
int live_byte_count_;
SlotsBuffer* slots_buffer_;
SkipList* skip_list_;
+ intptr_t write_barrier_counter_;
+ // Assuming the initial allocation on a page is sequential,
+ // count highest number of bytes ever allocated on the page.
+ int high_water_mark_;
static MemoryChunk* Initialize(Heap* heap,
Address base,
@@ -790,14 +817,6 @@ class Space : public Malloced {
virtual void Print() = 0;
#endif
- // After calling this we can allocate a certain number of bytes using only
- // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
- // without using freelists or causing a GC. This is used by partial
- // snapshots. It returns true of space was reserved or false if a GC is
- // needed. For paged spaces the space requested must include the space wasted
- // at the end of each when allocating linearly.
- virtual bool ReserveSpace(int bytes) = 0;
-
private:
Heap* heap_;
AllocationSpace id_;
@@ -1318,6 +1337,11 @@ class FreeListNode: public HeapObject {
inline void Zap();
+ static inline FreeListNode* cast(MaybeObject* maybe) {
+ ASSERT(!maybe->IsFailure());
+ return reinterpret_cast<FreeListNode*>(maybe);
+ }
+
private:
static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
@@ -1380,6 +1404,9 @@ class FreeList BASE_EMBEDDED {
bool IsVeryLong();
#endif
+ // Used after booting the VM.
+ void RepairLists(Heap* heap);
+
struct SizeStats {
intptr_t Total() {
return small_size_ + medium_size_ + large_size_ + huge_size_;
@@ -1460,6 +1487,10 @@ class PagedSpace : public Space {
// linear in the number of objects in the page. It may be slow.
MUST_USE_RESULT MaybeObject* FindObject(Address addr);
+ // During boot the free_space_map is created, and afterwards we may need
+ // to write it into the free list nodes that were already created.
+ virtual void RepairFreeListsAfterBoot();
+
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact();
@@ -1470,6 +1501,9 @@ class PagedSpace : public Space {
// spaces this equals the capacity.
intptr_t CommittedMemory() { return Capacity(); }
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory();
+
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
@@ -1530,6 +1564,7 @@ class PagedSpace : public Space {
void SetTop(Address top, Address limit) {
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
allocation_info_.top = top;
allocation_info_.limit = limit;
}
@@ -1551,19 +1586,21 @@ class PagedSpace : public Space {
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
-#ifdef DEBUG
- // Print meta info and objects in this space.
- virtual void Print();
-
+#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
- // Reports statistics for the space
- void ReportStatistics();
-
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject* obj) {}
+#endif
+
+#ifdef DEBUG
+ // Print meta info and objects in this space.
+ virtual void Print();
+
+ // Reports statistics for the space
+ void ReportStatistics();
// Report code object related statistics
void CollectCodeStatistics();
@@ -1911,9 +1948,12 @@ class SemiSpace : public Space {
NewSpacePage* first_page() { return anchor_.next_page(); }
NewSpacePage* current_page() { return current_page_; }
+#ifdef VERIFY_HEAP
+ virtual void Verify();
+#endif
+
#ifdef DEBUG
virtual void Print();
- virtual void Verify();
// Validate a range of of addresses in a SemiSpace.
// The "from" address must be on a page prior to the "to" address,
// in the linked page order, or it must be earlier on the same page.
@@ -1936,6 +1976,9 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory();
+
private:
// Flips the semispace between being from-space and to-space.
// Copies the flags into the masked positions on all pages in the space.
@@ -2133,6 +2176,9 @@ class NewSpace : public Space {
return Capacity();
}
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory();
+
// Return the available bytes without growing.
intptr_t Available() {
return Capacity() - Size();
@@ -2238,9 +2284,12 @@ class NewSpace : public Space {
template <typename StringType>
inline void ShrinkStringAtAllocationBoundary(String* string, int len);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
// Verify the active semispace.
virtual void Verify();
+#endif
+
+#ifdef DEBUG
// Print the active semispace.
virtual void Print() { to_space_.Print(); }
#endif
@@ -2410,9 +2459,7 @@ class MapSpace : public FixedSpace {
}
protected:
-#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
-#endif
private:
static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
@@ -2448,9 +2495,7 @@ class CellSpace : public FixedSpace {
}
protected:
-#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
-#endif
public:
TRACK_MEMORY("CellSpace")
@@ -2496,6 +2541,13 @@ class LargeObjectSpace : public Space {
return objects_size_;
}
+ intptr_t CommittedMemory() {
+ return Size();
+ }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory();
+
int PageCount() {
return page_count_;
}
@@ -2525,8 +2577,11 @@ class LargeObjectSpace : public Space {
LargePage* first_page() { return first_page_; }
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
virtual void Verify();
+#endif
+
+#ifdef DEBUG
virtual void Print();
void ReportStatistics();
void CollectCodeStatistics();
diff --git a/src/3rdparty/v8/src/splay-tree-inl.h b/src/3rdparty/v8/src/splay-tree-inl.h
index 4640ed5..4eca71d 100644
--- a/src/3rdparty/v8/src/splay-tree-inl.h
+++ b/src/3rdparty/v8/src/splay-tree-inl.h
@@ -42,10 +42,11 @@ SplayTree<Config, Allocator>::~SplayTree() {
template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
+bool SplayTree<Config, Allocator>::Insert(const Key& key,
+ Locator* locator) {
if (is_empty()) {
// If the tree is empty, insert the new node.
- root_ = new Node(key, Config::NoValue());
+ root_ = new(allocator_) Node(key, Config::NoValue());
} else {
// Splay on the key to move the last node on the search path
// for the key to the root of the tree.
@@ -57,7 +58,7 @@ bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
return false;
}
// Insert the new node.
- Node* node = new Node(key, Config::NoValue());
+ Node* node = new(allocator_) Node(key, Config::NoValue());
InsertInternal(cmp, node);
}
locator->bind(root_);
@@ -293,13 +294,13 @@ void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
template <typename Config, class Allocator> template <class Callback>
void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
// Pre-allocate some space for tiny trees.
- List<Node*, Allocator> nodes_to_visit(10);
- if (root_ != NULL) nodes_to_visit.Add(root_);
+ List<Node*, Allocator> nodes_to_visit(10, allocator_);
+ if (root_ != NULL) nodes_to_visit.Add(root_, allocator_);
int pos = 0;
while (pos < nodes_to_visit.length()) {
Node* node = nodes_to_visit[pos++];
- if (node->left() != NULL) nodes_to_visit.Add(node->left());
- if (node->right() != NULL) nodes_to_visit.Add(node->right());
+ if (node->left() != NULL) nodes_to_visit.Add(node->left(), allocator_);
+ if (node->right() != NULL) nodes_to_visit.Add(node->right(), allocator_);
callback->Call(node);
}
}
diff --git a/src/3rdparty/v8/src/splay-tree.h b/src/3rdparty/v8/src/splay-tree.h
index 72231e4..8844d8a 100644
--- a/src/3rdparty/v8/src/splay-tree.h
+++ b/src/3rdparty/v8/src/splay-tree.h
@@ -50,7 +50,7 @@ namespace internal {
// Forward defined as
// template <typename Config, class Allocator = FreeStoreAllocationPolicy>
// class SplayTree;
-template <typename Config, class Allocator>
+template <typename Config, class AllocationPolicy>
class SplayTree {
public:
typedef typename Config::Key Key;
@@ -58,13 +58,21 @@ class SplayTree {
class Locator;
- SplayTree() : root_(NULL) { }
+ SplayTree(AllocationPolicy allocator = AllocationPolicy())
+ : root_(NULL), allocator_(allocator) { }
~SplayTree();
- INLINE(void* operator new(size_t size)) {
- return Allocator::New(static_cast<int>(size));
+ INLINE(void* operator new(size_t size,
+ AllocationPolicy allocator = AllocationPolicy())) {
+ return allocator.New(static_cast<int>(size));
+ }
+ INLINE(void operator delete(void* p)) {
+ AllocationPolicy::Delete(p);
+ }
+ // Please the MSVC compiler. We should never have to execute this.
+ INLINE(void operator delete(void* p, AllocationPolicy policy)) {
+ UNREACHABLE();
}
- INLINE(void operator delete(void* p, size_t)) { return Allocator::Delete(p); }
// Inserts the given key in this tree with the given value. Returns
// true if a node was inserted, otherwise false. If found the locator
@@ -112,11 +120,16 @@ class SplayTree {
left_(NULL),
right_(NULL) { }
- INLINE(void* operator new(size_t size)) {
- return Allocator::New(static_cast<int>(size));
+ INLINE(void* operator new(size_t size, AllocationPolicy allocator)) {
+ return allocator.New(static_cast<int>(size));
+ }
+ INLINE(void operator delete(void* p)) {
+ return AllocationPolicy::Delete(p);
}
- INLINE(void operator delete(void* p, size_t)) {
- return Allocator::Delete(p);
+ // Please the MSVC compiler. We should never have to execute
+ // this.
+ INLINE(void operator delete(void* p, AllocationPolicy allocator)) {
+ UNREACHABLE();
}
Key key() { return key_; }
@@ -184,7 +197,7 @@ class SplayTree {
class NodeDeleter BASE_EMBEDDED {
public:
NodeDeleter() { }
- void Call(Node* node) { delete node; }
+ void Call(Node* node) { AllocationPolicy::Delete(node); }
private:
DISALLOW_COPY_AND_ASSIGN(NodeDeleter);
@@ -194,6 +207,7 @@ class SplayTree {
void ForEachNode(Callback* callback);
Node* root_;
+ AllocationPolicy allocator_;
DISALLOW_COPY_AND_ASSIGN(SplayTree);
};
diff --git a/src/3rdparty/v8/src/store-buffer.cc b/src/3rdparty/v8/src/store-buffer.cc
index 3852155..66488ae 100644
--- a/src/3rdparty/v8/src/store-buffer.cc
+++ b/src/3rdparty/v8/src/store-buffer.cc
@@ -372,7 +372,7 @@ void StoreBuffer::GCPrologue() {
}
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
// Do nothing.
}
@@ -415,7 +415,7 @@ void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
void StoreBuffer::Verify() {
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
VerifyPointers(heap_->old_pointer_space(),
&StoreBuffer::FindPointersToNewSpaceInRegion);
VerifyPointers(heap_->map_space(),
@@ -427,9 +427,11 @@ void StoreBuffer::Verify() {
void StoreBuffer::GCEpilogue() {
during_gc_ = false;
+#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
+#endif
}
diff --git a/src/3rdparty/v8/src/store-buffer.h b/src/3rdparty/v8/src/store-buffer.h
index 951a9ca..0ade8ce 100644
--- a/src/3rdparty/v8/src/store-buffer.h
+++ b/src/3rdparty/v8/src/store-buffer.h
@@ -195,7 +195,7 @@ class StoreBuffer {
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
void VerifyPointers(LargeObjectSpace* space);
#endif
diff --git a/src/3rdparty/v8/src/string-stream.cc b/src/3rdparty/v8/src/string-stream.cc
index 35f7be5..cffd7b0 100644
--- a/src/3rdparty/v8/src/string-stream.cc
+++ b/src/3rdparty/v8/src/string-stream.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -274,10 +274,10 @@ void StringStream::OutputToFile(FILE* out) {
for (unsigned next; (next = position + 2048) < length_; position = next) {
char save = buffer_[next];
buffer_[next] = '\0';
- internal::PrintF(out, "%s", &buffer_[position]);
+ internal::FPrintF(out, "%s", &buffer_[position]);
buffer_[next] = save;
}
- internal::PrintF(out, "%s", &buffer_[position]);
+ internal::FPrintF(out, "%s", &buffer_[position]);
}
@@ -291,7 +291,7 @@ void StringStream::ClearMentionedObjectCache() {
isolate->set_string_stream_current_security_token(NULL);
if (isolate->string_stream_debug_object_cache() == NULL) {
isolate->set_string_stream_debug_object_cache(
- new List<HeapObject*, PreallocatedStorage>(0));
+ new List<HeapObject*, PreallocatedStorageAllocationPolicy>(0));
}
isolate->string_stream_debug_object_cache()->Clear();
}
@@ -348,9 +348,12 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
Add("<Invalid map>\n");
return;
}
+ int real_size = map->NumberOfOwnDescriptors();
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetType(i) == FIELD) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (details.descriptor_index() > real_size) continue;
+ if (details.type() == FIELD) {
Object* key = descs->GetKey(i);
if (key->IsString() || key->IsNumber()) {
int len = 3;
@@ -427,7 +430,7 @@ void StringStream::PrintMentionedObjectCache() {
PrintUsingMap(JSObject::cast(printee));
if (printee->IsJSArray()) {
JSArray* array = JSArray::cast(printee);
- if (array->HasFastElements()) {
+ if (array->HasFastObjectElements()) {
unsigned int limit = FixedArray::cast(array->elements())->length();
unsigned int length =
static_cast<uint32_t>(JSArray::cast(array)->length()->Number());
@@ -469,7 +472,7 @@ void StringStream::PrintSecurityTokenIfChanged(Object* f) {
Add("(Function context is outside heap)\n");
return;
}
- Object* token = context->global_context()->security_token();
+ Object* token = context->native_context()->security_token();
if (token != isolate->string_stream_current_security_token()) {
Add("Security context: %o\n", token);
isolate->set_string_stream_current_security_token(token);
diff --git a/src/3rdparty/v8/src/stub-cache.cc b/src/3rdparty/v8/src/stub-cache.cc
index af41231..3796d2d 100644
--- a/src/3rdparty/v8/src/stub-cache.cc
+++ b/src/3rdparty/v8/src/stub-cache.cc
@@ -43,7 +43,8 @@ namespace internal {
// StubCache implementation.
-StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
+StubCache::StubCache(Isolate* isolate, Zone* zone)
+ : isolate_(isolate) {
ASSERT(isolate == Isolate::Current());
}
@@ -118,7 +119,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, NONEXISTENT);
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NONEXISTENT);
Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -137,7 +138,7 @@ Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
Handle<JSObject> holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::FIELD);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -157,7 +158,8 @@ Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
Handle<AccessorInfo> callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -171,13 +173,33 @@ Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
}
+Handle<Code> StubCache::ComputeLoadViaGetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> code =
+ compiler.CompileLoadViaGetter(name, receiver, holder, getter);
+ PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
+}
+
+
Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CONSTANT_FUNCTION);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -195,7 +217,8 @@ Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::INTERCEPTOR);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -220,7 +243,8 @@ Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
Handle<JSGlobalPropertyCell> cell,
bool is_dont_delete) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -239,7 +263,8 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
Handle<JSObject> holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::FIELD);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -258,8 +283,8 @@ Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
Handle<JSObject> holder,
Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
+ Code::CONSTANT_FUNCTION);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -278,7 +303,7 @@ Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
Handle<JSObject> holder) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -298,7 +323,7 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
Handle<AccessorInfo> callback) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -315,7 +340,7 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
Handle<JSArray> receiver) {
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -331,7 +356,7 @@ Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name,
Handle<String> receiver) {
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Map> map(receiver->map());
Handle<Object> probe(map->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -349,7 +374,7 @@ Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype(
Handle<String> name,
Handle<JSFunction> receiver) {
Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
+ Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -367,7 +392,8 @@ Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
int field_index,
Handle<Map> transition,
StrictModeFlag strict_mode) {
- PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
+ Code::StubType type =
+ (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, type, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
@@ -384,7 +410,7 @@ Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
- Handle<JSObject> receiver,
+ Handle<Map> receiver_map,
KeyedIC::StubKind stub_kind,
StrictModeFlag strict_mode) {
KeyedAccessGrowMode grow_mode =
@@ -395,7 +421,7 @@ Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
Code::ComputeMonomorphicFlags(
stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
: Code::KEYED_STORE_IC,
- NORMAL,
+ Code::NORMAL,
extra_state);
Handle<String> name;
switch (stub_kind) {
@@ -412,7 +438,6 @@ Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
UNREACHABLE();
break;
}
- Handle<Map> receiver_map(receiver->map());
Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -447,7 +472,7 @@ Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
} else {
PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, 0));
}
- JSObject::UpdateMapCodeCache(receiver, name, code);
+ Map::UpdateCodeCache(receiver_map, name, code);
return code;
}
@@ -464,7 +489,7 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
Handle<JSGlobalPropertyCell> cell,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, NORMAL, strict_mode);
+ Code::STORE_IC, Code::NORMAL, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -479,16 +504,38 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
Handle<JSObject> receiver,
+ Handle<JSObject> holder,
Handle<AccessorInfo> callback,
StrictModeFlag strict_mode) {
ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, CALLBACKS, strict_mode);
+ Code::STORE_IC, Code::CALLBACKS, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> code = compiler.CompileStoreCallback(receiver, callback, name);
+ Handle<Code> code =
+ compiler.CompileStoreCallback(name, receiver, holder, callback);
+ PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+ JSObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
+}
+
+
+Handle<Code> StubCache::ComputeStoreViaSetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter,
+ StrictModeFlag strict_mode) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::STORE_IC, Code::CALLBACKS, strict_mode);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ StoreStubCompiler compiler(isolate_, strict_mode);
+ Handle<Code> code =
+ compiler.CompileStoreViaSetter(name, receiver, holder, setter);
PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
JSObject::UpdateMapCodeCache(receiver, name, code);
@@ -500,7 +547,7 @@ Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
Handle<JSObject> receiver,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, INTERCEPTOR, strict_mode);
+ Code::STORE_IC, Code::INTERCEPTOR, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -517,7 +564,8 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
int field_index,
Handle<Map> transition,
StrictModeFlag strict_mode) {
- PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
+ Code::StubType type =
+ (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::KEYED_STORE_IC, type, strict_mode);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
@@ -560,7 +608,7 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, CONSTANT_FUNCTION, extra_state,
+ Code::ComputeMonomorphicFlags(kind, Code::CONSTANT_FUNCTION, extra_state,
cache_holder, argc);
Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -598,7 +646,7 @@ Handle<Code> StubCache::ComputeCallField(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, FIELD, extra_state,
+ Code::ComputeMonomorphicFlags(kind, Code::FIELD, extra_state,
cache_holder, argc);
Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -635,7 +683,7 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
}
Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, INTERCEPTOR, extra_state,
+ Code::ComputeMonomorphicFlags(kind, Code::INTERCEPTOR, extra_state,
cache_holder, argc);
Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -665,7 +713,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
IC::GetCodeCacheForObject(*receiver, *holder);
Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, NORMAL, extra_state,
+ Code::ComputeMonomorphicFlags(kind, Code::NORMAL, extra_state,
cache_holder, argc);
Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -698,11 +746,9 @@ Code* StubCache::FindCallInitialize(int argc,
CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
-
- // Use raw_unchecked... so we don't get assert failures during GC.
+ Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
UnseededNumberDictionary* dictionary =
- isolate()->heap()->raw_unchecked_non_monomorphic_cache();
+ isolate()->heap()->non_monomorphic_cache();
int entry = dictionary->FindEntry(isolate(), flags);
ASSERT(entry != -1);
Object* code = dictionary->ValueAt(entry);
@@ -719,7 +765,7 @@ Handle<Code> StubCache::ComputeCallInitialize(int argc,
CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
Code::Flags flags =
- Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
+ Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -748,7 +794,7 @@ Handle<Code> StubCache::ComputeCallPreMonomorphic(
Code::Kind kind,
Code::ExtraICState extra_state) {
Code::Flags flags =
- Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, NORMAL, argc);
+ Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -766,7 +812,7 @@ Handle<Code> StubCache::ComputeCallNormal(int argc,
Code::ExtraICState extra_state,
bool has_qml_global_receiver) {
Code::Flags flags =
- Code::ComputeFlags(kind, MONOMORPHIC, extra_state, NORMAL, argc);
+ Code::ComputeFlags(kind, MONOMORPHIC, extra_state, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -783,7 +829,7 @@ Handle<Code> StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
ASSERT(kind == Code::KEYED_CALL_IC);
Code::Flags flags =
Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState,
- NORMAL, argc);
+ Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -802,7 +848,7 @@ Handle<Code> StubCache::ComputeCallMegamorphic(
Code::ExtraICState extra_state) {
Code::Flags flags =
Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
- NORMAL, argc);
+ Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -822,7 +868,7 @@ Handle<Code> StubCache::ComputeCallMiss(int argc,
// and monomorphic stubs are not mixed up together in the stub cache.
Code::Flags flags =
Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
- NORMAL, argc, OWN_MAP);
+ Code::NORMAL, argc, OWN_MAP);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -842,7 +888,7 @@ Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
// the actual call ic to carry out the work.
Code::Flags flags =
Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState,
- NORMAL, argc);
+ Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -861,7 +907,7 @@ Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
// the actual call ic to carry out the work.
Code::Flags flags =
Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState,
- NORMAL, argc);
+ Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -891,7 +937,8 @@ void StubCache::Clear() {
void StubCache::CollectMatchingMaps(SmallMapList* types,
String* name,
Code::Flags flags,
- Handle<Context> global_context) {
+ Handle<Context> native_context,
+ Zone* zone) {
for (int i = 0; i < kPrimaryTableSize; i++) {
if (primary_[i].key == name) {
Map* map = primary_[i].value->FindFirstMap();
@@ -901,8 +948,8 @@ void StubCache::CollectMatchingMaps(SmallMapList* types,
int offset = PrimaryOffset(name, flags, map);
if (entry(primary_, offset) == &primary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
- types->Add(Handle<Map>(map));
+ !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ types->Add(Handle<Map>(map), zone);
}
}
}
@@ -925,8 +972,8 @@ void StubCache::CollectMatchingMaps(SmallMapList* types,
// Lookup in secondary table and add matches.
int offset = SecondaryOffset(name, flags, primary_offset);
if (entry(secondary_, offset) == &secondary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
- types->Add(Handle<Map>(map));
+ !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ types->Add(Handle<Map>(map), zone);
}
}
}
@@ -945,6 +992,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
Address getter_address = v8::ToCData<Address>(callback->getter());
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
+ ASSERT(callback->IsCompatibleReceiver(args[0]));
v8::AccessorInfo info(&args[0]);
HandleScope scope(isolate);
v8::Handle<v8::Value> result;
@@ -956,7 +1004,9 @@ RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (result.IsEmpty()) return HEAP->undefined_value();
- return *v8::Utils::OpenHandle(*result);
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ return *result_internal;
}
@@ -966,6 +1016,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
Address setter_address = v8::ToCData<Address>(callback->setter());
v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
ASSERT(fun != NULL);
+ ASSERT(callback->IsCompatibleReceiver(recv));
Handle<String> name = args.at<String>(2);
Handle<Object> value = args.at<Object>(3);
HandleScope scope(isolate);
@@ -1020,6 +1071,8 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
+ Handle<Object> result = v8::Utils::OpenHandle(*r);
+ result->VerifyApiCallResultType();
return *v8::Utils::OpenHandle(*r);
}
}
@@ -1076,7 +1129,9 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (!r.IsEmpty()) {
*attrs = NONE;
- return *v8::Utils::OpenHandle(*r);
+ Handle<Object> result = v8::Utils::OpenHandle(*r);
+ result->VerifyApiCallResultType();
+ return *result;
}
}
@@ -1315,16 +1370,14 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
Handle<String> name,
LookupResult* lookup) {
holder->LocalLookupRealNamedProperty(*name, lookup);
- if (lookup->IsProperty()) return;
-
- lookup->NotFound();
+ if (lookup->IsFound()) return;
if (holder->GetPrototype()->IsNull()) return;
-
holder->GetPrototype()->Lookup(*name, lookup);
}
-Handle<Code> LoadStubCompiler::GetCode(PropertyType type, Handle<String> name) {
+Handle<Code> LoadStubCompiler::GetCode(Code::StubType type,
+ Handle<String> name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
@@ -1333,7 +1386,7 @@ Handle<Code> LoadStubCompiler::GetCode(PropertyType type, Handle<String> name) {
}
-Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
+Handle<Code> KeyedLoadStubCompiler::GetCode(Code::StubType type,
Handle<String> name,
InlineCacheState state) {
Code::Flags flags = Code::ComputeFlags(
@@ -1345,7 +1398,7 @@ Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
}
-Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
+Handle<Code> StoreStubCompiler::GetCode(Code::StubType type,
Handle<String> name) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_);
@@ -1356,7 +1409,7 @@ Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
}
-Handle<Code> KeyedStoreStubCompiler::GetCode(PropertyType type,
+Handle<Code> KeyedStoreStubCompiler::GetCode(Code::StubType type,
Handle<String> name,
InlineCacheState state) {
Code::ExtraICState extra_state =
@@ -1424,6 +1477,7 @@ Handle<Code> CallStubCompiler::CompileCustomCall(
#undef CALL_GENERATOR_CASE
}
CallOptimization optimization(function);
+#ifndef _WIN32_WCE
ASSERT(optimization.is_simple_api_call());
return CompileFastApiCall(optimization,
object,
@@ -1431,10 +1485,15 @@ Handle<Code> CallStubCompiler::CompileCustomCall(
cell,
function,
fname);
+#else
+ // Disable optimization for wince as the calling convention looks different.
+ return Handle<Code>::null();
+#endif // _WIN32_WCE
}
-Handle<Code> CallStubCompiler::GetCode(PropertyType type, Handle<String> name) {
+Handle<Code> CallStubCompiler::GetCode(Code::StubType type,
+ Handle<String> name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
@@ -1450,7 +1509,7 @@ Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
if (function->shared()->name()->IsString()) {
function_name = Handle<String>(String::cast(function->shared()->name()));
}
- return GetCode(CONSTANT_FUNCTION, function_name);
+ return GetCode(Code::CONSTANT_FUNCTION, function_name);
}
diff --git a/src/3rdparty/v8/src/stub-cache.h b/src/3rdparty/v8/src/stub-cache.h
index 206dddd..ec9274b 100644
--- a/src/3rdparty/v8/src/stub-cache.h
+++ b/src/3rdparty/v8/src/stub-cache.h
@@ -90,6 +90,11 @@ class StubCache {
Handle<JSObject> holder,
Handle<AccessorInfo> callback);
+ Handle<Code> ComputeLoadViaGetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter);
+
Handle<Code> ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
@@ -154,9 +159,16 @@ class StubCache {
Handle<Code> ComputeStoreCallback(Handle<String> name,
Handle<JSObject> receiver,
+ Handle<JSObject> holder,
Handle<AccessorInfo> callback,
StrictModeFlag strict_mode);
+ Handle<Code> ComputeStoreViaSetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter,
+ StrictModeFlag strict_mode);
+
Handle<Code> ComputeStoreInterceptor(Handle<String> name,
Handle<JSObject> receiver,
StrictModeFlag strict_mode);
@@ -169,7 +181,7 @@ class StubCache {
Handle<Map> transition,
StrictModeFlag strict_mode);
- Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<JSObject> receiver,
+ Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<Map> receiver_map,
KeyedIC::StubKind stub_kind,
StrictModeFlag strict_mode);
@@ -251,7 +263,8 @@ class StubCache {
void CollectMatchingMaps(SmallMapList* types,
String* name,
Code::Flags flags,
- Handle<Context> global_context);
+ Handle<Context> native_context,
+ Zone* zone);
// Generate code for probing the stub cache table.
// Arguments extra, extra2 and extra3 may be used to pass additional scratch
@@ -303,7 +316,7 @@ class StubCache {
Factory* factory() { return isolate()->factory(); }
private:
- explicit StubCache(Isolate* isolate);
+ StubCache(Isolate* isolate, Zone* zone);
Handle<Code> ComputeCallInitialize(int argc,
RelocInfo::Mode mode,
@@ -461,14 +474,16 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
Label* miss_label);
- static void GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label);
+ void GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ int index,
+ Handle<Map> transition,
+ Handle<String> name,
+ Register receiver_reg,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label);
static void GenerateLoadMiss(MacroAssembler* masm,
Code::Kind kind);
@@ -512,6 +527,7 @@ class StubCompiler BASE_EMBEDDED {
int save_at_depth,
Label* miss);
+
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
@@ -536,10 +552,20 @@ class StubCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss);
+ void GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss);
+
void GenerateLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -594,6 +620,14 @@ class LoadStubCompiler: public StubCompiler {
Handle<JSObject> holder,
Handle<AccessorInfo> callback);
+ static void GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter);
+
+ Handle<Code> CompileLoadViaGetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter);
+
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> value,
@@ -610,7 +644,7 @@ class LoadStubCompiler: public StubCompiler {
bool is_dont_delete);
private:
- Handle<Code> GetCode(PropertyType type, Handle<String> name);
+ Handle<Code> GetCode(Code::StubType type, Handle<String> name);
};
@@ -658,7 +692,7 @@ class KeyedLoadStubCompiler: public StubCompiler {
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
private:
- Handle<Code> GetCode(PropertyType type,
+ Handle<Code> GetCode(Code::StubType type,
Handle<String> name,
InlineCacheState state = MONOMORPHIC);
};
@@ -675,9 +709,18 @@ class StoreStubCompiler: public StubCompiler {
Handle<Map> transition,
Handle<String> name);
- Handle<Code> CompileStoreCallback(Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name);
+ Handle<Code> CompileStoreCallback(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback);
+
+ static void GenerateStoreViaSetter(MacroAssembler* masm,
+ Handle<JSFunction> setter);
+
+ Handle<Code> CompileStoreViaSetter(Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter);
Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
Handle<String> name);
@@ -687,7 +730,7 @@ class StoreStubCompiler: public StubCompiler {
Handle<String> name);
private:
- Handle<Code> GetCode(PropertyType type, Handle<String> name);
+ Handle<Code> GetCode(Code::StubType type, Handle<String> name);
StrictModeFlag strict_mode_;
};
@@ -728,7 +771,7 @@ class KeyedStoreStubCompiler: public StubCompiler {
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
private:
- Handle<Code> GetCode(PropertyType type,
+ Handle<Code> GetCode(Code::StubType type,
Handle<String> name,
InlineCacheState state = MONOMORPHIC);
@@ -808,7 +851,7 @@ class CallStubCompiler: public StubCompiler {
Handle<JSFunction> function,
Handle<String> name);
- Handle<Code> GetCode(PropertyType type, Handle<String> name);
+ Handle<Code> GetCode(Code::StubType type, Handle<String> name);
Handle<Code> GetCode(Handle<JSFunction> function);
const ParameterCount& arguments() { return arguments_; }
diff --git a/src/3rdparty/v8/src/token.h b/src/3rdparty/v8/src/token.h
index 3036e55..863ba62 100644
--- a/src/3rdparty/v8/src/token.h
+++ b/src/3rdparty/v8/src/token.h
@@ -99,6 +99,7 @@ namespace internal {
T(SHL, "<<", 11) \
T(SAR, ">>", 11) \
T(SHR, ">>>", 11) \
+ T(ROR, "rotate right", 11) /* only used by Crankshaft */ \
T(ADD, "+", 12) \
T(SUB, "-", 12) \
T(MUL, "*", 13) \
diff --git a/src/3rdparty/v8/src/transitions-inl.h b/src/3rdparty/v8/src/transitions-inl.h
new file mode 100644
index 0000000..cfaa99d
--- /dev/null
+++ b/src/3rdparty/v8/src/transitions-inl.h
@@ -0,0 +1,220 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TRANSITIONS_INL_H_
+#define V8_TRANSITIONS_INL_H_
+
+#include "objects-inl.h"
+#include "transitions.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define FIELD_ADDR(p, offset) \
+ (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
+
+#define WRITE_FIELD(p, offset, value) \
+ (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
+
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ heap->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ if (heap->InNewSpace(value)) { \
+ heap->RecordWrite(object->address(), offset); \
+ } \
+ }
+
+
+TransitionArray* TransitionArray::cast(Object* object) {
+ ASSERT(object->IsTransitionArray());
+ return reinterpret_cast<TransitionArray*>(object);
+}
+
+
+Map* TransitionArray::elements_transition() {
+ Object* transition_map = get(kElementsTransitionIndex);
+ return Map::cast(transition_map);
+}
+
+
+void TransitionArray::ClearElementsTransition() {
+ WRITE_FIELD(this, kElementsTransitionOffset, Smi::FromInt(0));
+}
+
+
+bool TransitionArray::HasElementsTransition() {
+ return IsFullTransitionArray() &&
+ get(kElementsTransitionIndex) != Smi::FromInt(0);
+}
+
+
+void TransitionArray::set_elements_transition(Map* transition_map,
+ WriteBarrierMode mode) {
+ ASSERT(IsFullTransitionArray());
+ Heap* heap = GetHeap();
+ WRITE_FIELD(this, kElementsTransitionOffset, transition_map);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kElementsTransitionOffset, transition_map, mode);
+}
+
+
+Object* TransitionArray::back_pointer_storage() {
+ return get(kBackPointerStorageIndex);
+}
+
+
+void TransitionArray::set_back_pointer_storage(Object* back_pointer,
+ WriteBarrierMode mode) {
+ Heap* heap = GetHeap();
+ WRITE_FIELD(this, kBackPointerStorageOffset, back_pointer);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kBackPointerStorageOffset, back_pointer, mode);
+}
+
+
+bool TransitionArray::HasPrototypeTransitions() {
+ return IsFullTransitionArray() &&
+ get(kPrototypeTransitionsIndex) != Smi::FromInt(0);
+}
+
+
+FixedArray* TransitionArray::GetPrototypeTransitions() {
+ ASSERT(IsFullTransitionArray());
+ Object* prototype_transitions = get(kPrototypeTransitionsIndex);
+ return FixedArray::cast(prototype_transitions);
+}
+
+
+HeapObject* TransitionArray::UncheckedPrototypeTransitions() {
+ ASSERT(HasPrototypeTransitions());
+ return reinterpret_cast<HeapObject*>(get(kPrototypeTransitionsIndex));
+}
+
+
+void TransitionArray::SetPrototypeTransitions(FixedArray* transitions,
+ WriteBarrierMode mode) {
+ ASSERT(IsFullTransitionArray());
+ ASSERT(transitions->IsFixedArray());
+ Heap* heap = GetHeap();
+ WRITE_FIELD(this, kPrototypeTransitionsOffset, transitions);
+ CONDITIONAL_WRITE_BARRIER(
+ heap, this, kPrototypeTransitionsOffset, transitions, mode);
+}
+
+
+Object** TransitionArray::GetPrototypeTransitionsSlot() {
+ return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
+ kPrototypeTransitionsOffset);
+}
+
+
+Object** TransitionArray::GetKeySlot(int transition_number) {
+ ASSERT(!IsSimpleTransition());
+ ASSERT(transition_number < number_of_transitions());
+ return HeapObject::RawField(
+ reinterpret_cast<HeapObject*>(this),
+ OffsetOfElementAt(ToKeyIndex(transition_number)));
+}
+
+
+String* TransitionArray::GetKey(int transition_number) {
+ if (IsSimpleTransition()) {
+ Map* target = GetTarget(kSimpleTransitionIndex);
+ int descriptor = target->LastAdded();
+ String* key = target->instance_descriptors()->GetKey(descriptor);
+ return key;
+ }
+ ASSERT(transition_number < number_of_transitions());
+ return String::cast(get(ToKeyIndex(transition_number)));
+}
+
+
+void TransitionArray::SetKey(int transition_number, String* key) {
+ ASSERT(!IsSimpleTransition());
+ ASSERT(transition_number < number_of_transitions());
+ set(ToKeyIndex(transition_number), key);
+}
+
+
+Map* TransitionArray::GetTarget(int transition_number) {
+ if (IsSimpleTransition()) {
+ ASSERT(transition_number == kSimpleTransitionIndex);
+ return Map::cast(get(kSimpleTransitionTarget));
+ }
+ ASSERT(transition_number < number_of_transitions());
+ return Map::cast(get(ToTargetIndex(transition_number)));
+}
+
+
+void TransitionArray::SetTarget(int transition_number, Map* value) {
+ if (IsSimpleTransition()) {
+ ASSERT(transition_number == kSimpleTransitionIndex);
+ return set(kSimpleTransitionTarget, value);
+ }
+ ASSERT(transition_number < number_of_transitions());
+ set(ToTargetIndex(transition_number), value);
+}
+
+
+PropertyDetails TransitionArray::GetTargetDetails(int transition_number) {
+ Map* map = GetTarget(transition_number);
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int descriptor = map->LastAdded();
+ return descriptors->GetDetails(descriptor);
+}
+
+
+int TransitionArray::Search(String* name) {
+ if (IsSimpleTransition()) {
+ String* key = GetKey(kSimpleTransitionIndex);
+ if (key->Equals(name)) return kSimpleTransitionIndex;
+ return kNotFound;
+ }
+ return internal::Search<ALL_ENTRIES>(this, name);
+}
+
+
+void TransitionArray::NoIncrementalWriteBarrierSet(int transition_number,
+ String* key,
+ Map* target) {
+ FixedArray::NoIncrementalWriteBarrierSet(
+ this, ToKeyIndex(transition_number), key);
+ FixedArray::NoIncrementalWriteBarrierSet(
+ this, ToTargetIndex(transition_number), target);
+}
+
+
+#undef FIELD_ADDR
+#undef WRITE_FIELD
+#undef CONDITIONAL_WRITE_BARRIER
+
+
+} } // namespace v8::internal
+
+#endif // V8_TRANSITIONS_INL_H_
diff --git a/src/3rdparty/v8/src/transitions.cc b/src/3rdparty/v8/src/transitions.cc
new file mode 100644
index 0000000..56b6caf
--- /dev/null
+++ b/src/3rdparty/v8/src/transitions.cc
@@ -0,0 +1,160 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "objects.h"
+#include "transitions-inl.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+
+static MaybeObject* AllocateRaw(int length) {
+ Heap* heap = Isolate::Current()->heap();
+
+ // Use FixedArray to not use TransitionArray::cast on incomplete object.
+ FixedArray* array;
+ MaybeObject* maybe_array = heap->AllocateFixedArray(length);
+ if (!maybe_array->To(&array)) return maybe_array;
+ return array;
+}
+
+
+MaybeObject* TransitionArray::Allocate(int number_of_transitions) {
+ FixedArray* array;
+ MaybeObject* maybe_array = AllocateRaw(ToKeyIndex(number_of_transitions));
+ if (!maybe_array->To(&array)) return maybe_array;
+ array->set(kElementsTransitionIndex, Smi::FromInt(0));
+ array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
+ return array;
+}
+
+
+void TransitionArray::NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
+ int origin_transition,
+ int target_transition) {
+ NoIncrementalWriteBarrierSet(target_transition,
+ origin->GetKey(origin_transition),
+ origin->GetTarget(origin_transition));
+}
+
+
+static bool InsertionPointFound(String* key1, String* key2) {
+ return key1->Hash() > key2->Hash();
+}
+
+
+MaybeObject* TransitionArray::NewWith(SimpleTransitionFlag flag,
+ String* key,
+ Map* target,
+ Object* back_pointer) {
+ TransitionArray* result;
+ MaybeObject* maybe_result;
+
+ if (flag == SIMPLE_TRANSITION) {
+ maybe_result = AllocateRaw(kSimpleTransitionSize);
+ if (!maybe_result->To(&result)) return maybe_result;
+ result->set(kSimpleTransitionTarget, target);
+ } else {
+ maybe_result = Allocate(1);
+ if (!maybe_result->To(&result)) return maybe_result;
+ result->NoIncrementalWriteBarrierSet(0, key, target);
+ }
+ result->set_back_pointer_storage(back_pointer);
+ return result;
+}
+
+
+MaybeObject* TransitionArray::ExtendToFullTransitionArray() {
+ ASSERT(!IsFullTransitionArray());
+ int nof = number_of_transitions();
+ TransitionArray* result;
+ MaybeObject* maybe_result = Allocate(nof);
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ if (nof == 1) {
+ result->NoIncrementalWriteBarrierCopyFrom(this, kSimpleTransitionIndex, 0);
+ }
+
+ result->set_back_pointer_storage(back_pointer_storage());
+ return result;
+}
+
+
+MaybeObject* TransitionArray::CopyInsert(String* name, Map* target) {
+ TransitionArray* result;
+
+ int number_of_transitions = this->number_of_transitions();
+ int new_size = number_of_transitions;
+
+ int insertion_index = this->Search(name);
+ if (insertion_index == kNotFound) ++new_size;
+
+ MaybeObject* maybe_array;
+ maybe_array = TransitionArray::Allocate(new_size);
+ if (!maybe_array->To(&result)) return maybe_array;
+
+ if (HasElementsTransition()) {
+ result->set_elements_transition(elements_transition());
+ }
+
+ if (HasPrototypeTransitions()) {
+ result->SetPrototypeTransitions(GetPrototypeTransitions());
+ }
+
+ if (insertion_index != kNotFound) {
+ for (int i = 0; i < number_of_transitions; ++i) {
+ if (i != insertion_index) {
+ result->NoIncrementalWriteBarrierCopyFrom(this, i, i);
+ }
+ }
+ result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
+ return result;
+ }
+
+ insertion_index = 0;
+ for (; insertion_index < number_of_transitions; ++insertion_index) {
+ if (InsertionPointFound(GetKey(insertion_index), name)) break;
+ result->NoIncrementalWriteBarrierCopyFrom(
+ this, insertion_index, insertion_index);
+ }
+
+ result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
+
+ for (; insertion_index < number_of_transitions; ++insertion_index) {
+ result->NoIncrementalWriteBarrierCopyFrom(
+ this, insertion_index, insertion_index + 1);
+ }
+
+ result->set_back_pointer_storage(back_pointer_storage());
+ return result;
+}
+
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/transitions.h b/src/3rdparty/v8/src/transitions.h
new file mode 100644
index 0000000..0a66026
--- /dev/null
+++ b/src/3rdparty/v8/src/transitions.h
@@ -0,0 +1,207 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TRANSITIONS_H_
+#define V8_TRANSITIONS_H_
+
+#include "elements-kind.h"
+#include "heap.h"
+#include "isolate.h"
+#include "objects.h"
+#include "v8checks.h"
+
+namespace v8 {
+namespace internal {
+
+
+// TransitionArrays are fixed arrays used to hold map transitions for property,
+// constant, and element changes. They can either be simple transition arrays
+// that store a single property transition, or a full transition array that has
+// space for elements transitions, prototype transitions and multiple property
+// transitons. The details related to property transitions are accessed in the
+// descriptor array of the target map. In the case of a simple transition, the
+// key is also read from the descriptor array of the target map.
+//
+// The simple format of the these objects is:
+// [0] Undefined or back pointer map
+// [1] Single transition
+//
+// The full format is:
+// [0] Undefined or back pointer map
+// [1] Smi(0) or elements transition map
+// [2] Smi(0) or fixed array of prototype transitions
+// [3] First transition
+// [length() - kTransitionSize] Last transition
+class TransitionArray: public FixedArray {
+ public:
+ // Accessors for fetching instance transition at transition number.
+ inline String* GetKey(int transition_number);
+ inline void SetKey(int transition_number, String* value);
+ inline Object** GetKeySlot(int transition_number);
+ int GetSortedKeyIndex(int transition_number) { return transition_number; }
+
+ String* GetSortedKey(int transition_number) {
+ return GetKey(transition_number);
+ }
+
+ inline Map* GetTarget(int transition_number);
+ inline void SetTarget(int transition_number, Map* target);
+
+ inline PropertyDetails GetTargetDetails(int transition_number);
+
+ inline Map* elements_transition();
+ inline void set_elements_transition(
+ Map* target,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline bool HasElementsTransition();
+ inline void ClearElementsTransition();
+
+ inline Object* back_pointer_storage();
+ inline void set_back_pointer_storage(
+ Object* back_pointer,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ inline FixedArray* GetPrototypeTransitions();
+ inline void SetPrototypeTransitions(
+ FixedArray* prototype_transitions,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline Object** GetPrototypeTransitionsSlot();
+ inline bool HasPrototypeTransitions();
+ inline HeapObject* UncheckedPrototypeTransitions();
+
+ // Returns the number of transitions in the array.
+ int number_of_transitions() {
+ if (IsSimpleTransition()) return 1;
+ int len = length();
+ return len <= kFirstIndex ? 0 : (len - kFirstIndex) / kTransitionSize;
+ }
+
+ inline int number_of_entries() { return number_of_transitions(); }
+
+ // Allocate a new transition array with a single entry.
+ static MUST_USE_RESULT MaybeObject* NewWith(
+ SimpleTransitionFlag flag,
+ String* key,
+ Map* target,
+ Object* back_pointer);
+
+ MUST_USE_RESULT MaybeObject* ExtendToFullTransitionArray();
+
+ // Copy the transition array, inserting a new transition.
+ // TODO(verwaest): This should not cause an existing transition to be
+ // overwritten.
+ MUST_USE_RESULT MaybeObject* CopyInsert(String* name, Map* target);
+
+ // Copy a single transition from the origin array.
+ inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
+ int origin_transition,
+ int target_transition);
+
+ // Search a transition for a given property name.
+ inline int Search(String* name);
+
+ // Allocates a TransitionArray.
+ MUST_USE_RESULT static MaybeObject* Allocate(int number_of_transitions);
+
+ bool IsSimpleTransition() { return length() == kSimpleTransitionSize; }
+ bool IsFullTransitionArray() { return length() >= kFirstIndex; }
+
+ // Casting.
+ static inline TransitionArray* cast(Object* obj);
+
+ // Constant for denoting key was not found.
+ static const int kNotFound = -1;
+
+ static const int kBackPointerStorageIndex = 0;
+
+ // Layout for full transition arrays.
+ static const int kElementsTransitionIndex = 1;
+ static const int kPrototypeTransitionsIndex = 2;
+ static const int kFirstIndex = 3;
+
+ // Layout for simple transition arrays.
+ static const int kSimpleTransitionTarget = 1;
+ static const int kSimpleTransitionSize = 2;
+ static const int kSimpleTransitionIndex = 0;
+ STATIC_ASSERT(kSimpleTransitionIndex != kNotFound);
+
+ static const int kBackPointerStorageOffset = FixedArray::kHeaderSize;
+
+ // Layout for the full transition array header.
+ static const int kElementsTransitionOffset = kBackPointerStorageOffset +
+ kPointerSize;
+ static const int kPrototypeTransitionsOffset = kElementsTransitionOffset +
+ kPointerSize;
+
+ // Layout of map transition entries in full transition arrays.
+ static const int kTransitionKey = 0;
+ static const int kTransitionTarget = 1;
+ static const int kTransitionSize = 2;
+
+#ifdef OBJECT_PRINT
+ // Print all the transitions.
+ inline void PrintTransitions() {
+ PrintTransitions(stdout);
+ }
+ void PrintTransitions(FILE* out);
+#endif
+
+#ifdef DEBUG
+ bool IsSortedNoDuplicates(int valid_entries = -1);
+ bool IsConsistentWithBackPointers(Map* current_map);
+ bool IsEqualTo(TransitionArray* other);
+#endif
+
+ // The maximum number of transitions we want in a transition array (should
+ // fit in a page).
+ static const int kMaxNumberOfTransitions = 1024 + 512;
+
+ private:
+ // Conversion from transition number to array indices.
+ static int ToKeyIndex(int transition_number) {
+ return kFirstIndex +
+ (transition_number * kTransitionSize) +
+ kTransitionKey;
+ }
+
+ static int ToTargetIndex(int transition_number) {
+ return kFirstIndex +
+ (transition_number * kTransitionSize) +
+ kTransitionTarget;
+ }
+
+ inline void NoIncrementalWriteBarrierSet(int transition_number,
+ String* key,
+ Map* target);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_TRANSITIONS_H_
diff --git a/src/3rdparty/v8/src/type-info.cc b/src/3rdparty/v8/src/type-info.cc
index 159be6a..bc6a46b 100644
--- a/src/3rdparty/v8/src/type-info.cc
+++ b/src/3rdparty/v8/src/type-info.cc
@@ -60,17 +60,24 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> global_context,
- Isolate* isolate) {
- global_context_ = global_context;
+ Handle<Context> native_context,
+ Isolate* isolate,
+ Zone* zone) {
+ native_context_ = native_context;
isolate_ = isolate;
+ zone_ = zone;
BuildDictionary(code);
ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
}
-Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
- int entry = dictionary_->FindEntry(ast_id);
+static uint32_t IdToKey(TypeFeedbackId ast_id) {
+ return static_cast<uint32_t>(ast_id.ToInt());
+}
+
+
+Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
+ int entry = dictionary_->FindEntry(IdToKey(ast_id));
return entry != UnseededNumberDictionary::kNotFound
? Handle<Object>(dictionary_->ValueAt(entry))
: Handle<Object>::cast(isolate_->factory()->undefined_value());
@@ -78,7 +85,7 @@ Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsMap()) return false;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -89,22 +96,23 @@ bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- return code->is_keyed_load_stub() &&
+ bool preliminary_checks = code->is_keyed_load_stub() &&
code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
- code->FindFirstMap() != NULL &&
- !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
+ Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
+ if (!preliminary_checks) return false;
+ Map* map = code->FindFirstMap();
+ return map != NULL && !CanRetainOtherContext(map, *native_context_);
}
return false;
}
bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Builtins* builtins = isolate_->builtins();
@@ -116,27 +124,29 @@ bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
}
-bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
+ Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
bool allow_growth =
Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
ALLOW_JSARRAY_GROWTH;
- return code->is_keyed_store_stub() &&
+ bool preliminary_checks =
+ code->is_keyed_store_stub() &&
!allow_growth &&
code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
- code->FindFirstMap() != NULL &&
- !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
+ Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
+ if (!preliminary_checks) return false;
+ Map* map = code->FindFirstMap();
+ return map != NULL && !CanRetainOtherContext(map, *native_context_);
}
return false;
}
-bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id) {
+ Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Builtins* builtins = isolate_->builtins();
@@ -154,26 +164,26 @@ bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
- Handle<Object> value = GetInfo(expr->id());
+ Handle<Object> value = GetInfo(expr->CallFeedbackId());
return value->IsMap() || value->IsSmi() || value->IsJSFunction();
}
bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
- Handle<Object> value = GetInfo(expr->id());
+ Handle<Object> value = GetInfo(expr->CallNewFeedbackId());
return value->IsJSFunction();
}
bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
ObjectLiteral::Property* prop) {
- Handle<Object> map_or_code = GetInfo(prop->key()->id());
+ Handle<Object> map_or_code = GetInfo(prop->key()->LiteralFeedbackId());
return map_or_code->IsMap();
}
bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
- Handle<Object> value = GetInfo(stmt->PrepareId());
+ Handle<Object> value = GetInfo(stmt->ForInFeedbackId());
return value->IsSmi() &&
Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker;
}
@@ -181,12 +191,12 @@ bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
ASSERT(LoadIsMonomorphicNormal(expr));
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Map* first_map = code->FindFirstMap();
ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *global_context_)
+ return CanRetainOtherContext(first_map, *native_context_)
? Handle<Map>::null()
: Handle<Map>(first_map);
}
@@ -194,14 +204,15 @@ Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
}
-Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
- ASSERT(StoreIsMonomorphicNormal(expr));
- Handle<Object> map_or_code = GetInfo(expr->id());
+Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
+ TypeFeedbackId ast_id) {
+ ASSERT(StoreIsMonomorphicNormal(ast_id));
+ Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Map* first_map = code->FindFirstMap();
ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *global_context_)
+ return CanRetainOtherContext(first_map, *native_context_)
? Handle<Map>::null()
: Handle<Map>(first_map);
}
@@ -212,16 +223,18 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- CollectReceiverTypes(expr->id(), name, flags, types);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
+ CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
}
void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
- CollectReceiverTypes(expr->id(), name, flags, types);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::STORE_IC, Code::NORMAL);
+ CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types);
}
@@ -237,16 +250,16 @@ void TypeFeedbackOracle::CallReceiverTypes(Call* expr,
CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
- NORMAL,
+ Code::NORMAL,
extra_ic_state,
OWN_MAP,
arity);
- CollectReceiverTypes(expr->id(), name, flags, types);
+ CollectReceiverTypes(expr->CallFeedbackId(), name, flags, types);
}
CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
- Handle<Object> value = GetInfo(expr->id());
+ Handle<Object> value = GetInfo(expr->CallFeedbackId());
if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
ASSERT(check != RECEIVER_MAP_CHECK);
@@ -262,13 +275,13 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
UNREACHABLE();
break;
case STRING_CHECK:
- function = global_context_->string_function();
+ function = native_context_->string_function();
break;
case NUMBER_CHECK:
- function = global_context_->number_function();
+ function = native_context_->number_function();
break;
case BOOLEAN_CHECK:
- function = global_context_->boolean_function();
+ function = native_context_->boolean_function();
break;
}
ASSERT(function != NULL);
@@ -277,30 +290,30 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->id()));
+ return Handle<JSFunction>::cast(GetInfo(expr->CallFeedbackId()));
}
Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->id()));
+ return Handle<JSFunction>::cast(GetInfo(expr->CallNewFeedbackId()));
}
Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
ObjectLiteral::Property* prop) {
ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
- return Handle<Map>::cast(GetInfo(prop->key()->id()));
+ return Handle<Map>::cast(GetInfo(prop->key()->LiteralFeedbackId()));
}
bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
- return *GetInfo(expr->id()) ==
+ return *GetInfo(expr->PropertyFeedbackId()) ==
isolate_->builtins()->builtin(id);
}
TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -330,7 +343,7 @@ TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
if (!object->IsCode()) return false;
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return false;
@@ -340,7 +353,7 @@ bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
if (!object->IsCode()) return Handle<Map>::null();
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return Handle<Map>::null();
@@ -350,14 +363,14 @@ Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
}
Map* first_map = code->FindFirstMap();
ASSERT(first_map != NULL);
- return CanRetainOtherContext(first_map, *global_context_)
+ return CanRetainOtherContext(first_map, *native_context_)
? Handle<Map>::null()
: Handle<Map>(first_map);
}
TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->UnaryOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -376,7 +389,7 @@ TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
- Handle<Object> object = GetInfo(expr->id());
+ Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -460,7 +473,7 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
- Handle<Object> object = GetInfo(expr->CountId());
+ Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
@@ -488,7 +501,7 @@ TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
}
-void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
+void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<String> name,
Code::Flags flags,
SmallMapList* types) {
@@ -501,34 +514,35 @@ void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
// we need a generic store (or load) here.
ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
} else if (object->IsMap()) {
- types->Add(Handle<Map>::cast(object));
+ types->Add(Handle<Map>::cast(object), zone());
} else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
- types->Reserve(4);
+ types->Reserve(4, zone());
ASSERT(object->IsCode());
isolate_->stub_cache()->CollectMatchingMaps(types,
*name,
flags,
- global_context_);
+ native_context_,
+ zone());
}
}
-// Check if a map originates from a given global context. We use this
+// Check if a map originates from a given native context. We use this
// information to filter out maps from different context to avoid
// retaining objects from different tabs in Chrome via optimized code.
bool TypeFeedbackOracle::CanRetainOtherContext(Map* map,
- Context* global_context) {
+ Context* native_context) {
Object* constructor = NULL;
while (!map->prototype()->IsNull()) {
constructor = map->constructor();
if (!constructor->IsNull()) {
// If the constructor is not null or a JSFunction, we have to
- // conservatively assume that it may retain a global context.
+ // conservatively assume that it may retain a native context.
if (!constructor->IsJSFunction()) return true;
// Check if the constructor directly references a foreign context.
if (CanRetainOtherContext(JSFunction::cast(constructor),
- global_context)) {
+ native_context)) {
return true;
}
}
@@ -537,26 +551,27 @@ bool TypeFeedbackOracle::CanRetainOtherContext(Map* map,
constructor = map->constructor();
if (constructor->IsNull()) return false;
JSFunction* function = JSFunction::cast(constructor);
- return CanRetainOtherContext(function, global_context);
+ return CanRetainOtherContext(function, native_context);
}
bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
- Context* global_context) {
- return function->context()->global() != global_context->global()
- && function->context()->global() != global_context->builtins();
+ Context* native_context) {
+ return function->context()->global_object() != native_context->global_object()
+ && function->context()->global_object() != native_context->builtins();
}
-static void AddMapIfMissing(Handle<Map> map, SmallMapList* list) {
+static void AddMapIfMissing(Handle<Map> map, SmallMapList* list,
+ Zone* zone) {
for (int i = 0; i < list->length(); ++i) {
if (list->at(i).is_identical_to(map)) return;
}
- list->Add(map);
+ list->Add(map, zone);
}
-void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
+void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
if (!object->IsCode()) return;
@@ -570,8 +585,8 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
Object* object = info->target_object();
if (object->IsMap()) {
Map* map = Map::cast(object);
- if (!CanRetainOtherContext(map, *global_context_)) {
- AddMapIfMissing(Handle<Map>(map), types);
+ if (!CanRetainOtherContext(map, *native_context_)) {
+ AddMapIfMissing(Handle<Map>(map), types, zone());
}
}
}
@@ -579,7 +594,7 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
}
-byte TypeFeedbackOracle::ToBooleanTypes(unsigned ast_id) {
+byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId ast_id) {
Handle<Object> object = GetInfo(ast_id);
return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0;
}
@@ -591,7 +606,7 @@ byte TypeFeedbackOracle::ToBooleanTypes(unsigned ast_id) {
// infos before we process them.
void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
AssertNoAllocation no_allocation;
- ZoneList<RelocInfo> infos(16);
+ ZoneList<RelocInfo> infos(16, zone());
HandleScope scope;
GetRelocInfos(code, &infos);
CreateDictionary(code, &infos);
@@ -606,7 +621,7 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
ZoneList<RelocInfo>* infos) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- infos->Add(*it.rinfo());
+ infos->Add(*it.rinfo(), zone());
}
}
@@ -640,7 +655,8 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
for (int i = 0; i < infos->length(); i++) {
RelocInfo reloc_entry = (*infos)[i];
Address target_address = reloc_entry.target_address();
- unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
+ TypeFeedbackId ast_id =
+ TypeFeedbackId(static_cast<unsigned>((*infos)[i].data()));
Code* target = Code::GetCodeFromTargetAddress(target_address);
switch (target->kind()) {
case Code::LOAD_IC:
@@ -656,7 +672,7 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
if (map == NULL) {
SetInfo(ast_id, static_cast<Object*>(target));
} else if (!CanRetainOtherContext(Map::cast(map),
- *global_context_)) {
+ *native_context_)) {
SetInfo(ast_id, map);
}
}
@@ -693,21 +709,22 @@ void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
Handle<TypeFeedbackCells> cache(
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
for (int i = 0; i < cache->CellCount(); i++) {
- unsigned ast_id = cache->AstId(i)->value();
+ TypeFeedbackId ast_id = cache->AstId(i);
Object* value = cache->Cell(i)->value();
if (value->IsSmi() ||
(value->IsJSFunction() &&
!CanRetainOtherContext(JSFunction::cast(value),
- *global_context_))) {
+ *native_context_))) {
SetInfo(ast_id, value);
}
}
}
-void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
- ASSERT(dictionary_->FindEntry(ast_id) == UnseededNumberDictionary::kNotFound);
- MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
+void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
+ ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) ==
+ UnseededNumberDictionary::kNotFound);
+ MaybeObject* maybe_result = dictionary_->AtNumberPut(IdToKey(ast_id), target);
USE(maybe_result);
#ifdef DEBUG
Object* result = NULL;
diff --git a/src/3rdparty/v8/src/type-info.h b/src/3rdparty/v8/src/type-info.h
index d461331..00d88c2 100644
--- a/src/3rdparty/v8/src/type-info.h
+++ b/src/3rdparty/v8/src/type-info.h
@@ -232,17 +232,18 @@ class UnaryOperation;
class ForInStatement;
-class TypeFeedbackOracle BASE_EMBEDDED {
+class TypeFeedbackOracle: public ZoneObject {
public:
TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> global_context,
- Isolate* isolate);
+ Handle<Context> native_context,
+ Isolate* isolate,
+ Zone* zone);
bool LoadIsMonomorphicNormal(Property* expr);
bool LoadIsUninitialized(Property* expr);
bool LoadIsMegamorphicWithTypeInfo(Property* expr);
- bool StoreIsMonomorphicNormal(Expression* expr);
- bool StoreIsMegamorphicWithTypeInfo(Expression* expr);
+ bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
+ bool StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id);
bool CallIsMonomorphic(Call* expr);
bool CallNewIsMonomorphic(CallNew* expr);
bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop);
@@ -250,7 +251,7 @@ class TypeFeedbackOracle BASE_EMBEDDED {
bool IsForInFastCase(ForInStatement* expr);
Handle<Map> LoadMonomorphicReceiverType(Property* expr);
- Handle<Map> StoreMonomorphicReceiverType(Expression* expr);
+ Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId ast_id);
void LoadReceiverTypes(Property* expr,
Handle<String> name,
@@ -262,12 +263,12 @@ class TypeFeedbackOracle BASE_EMBEDDED {
Handle<String> name,
CallKind call_kind,
SmallMapList* types);
- void CollectKeyedReceiverTypes(unsigned ast_id,
+ void CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
SmallMapList* types);
- static bool CanRetainOtherContext(Map* map, Context* global_context);
+ static bool CanRetainOtherContext(Map* map, Context* native_context);
static bool CanRetainOtherContext(JSFunction* function,
- Context* global_context);
+ Context* native_context);
CheckType GetCallCheckType(Call* expr);
Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
@@ -282,7 +283,7 @@ class TypeFeedbackOracle BASE_EMBEDDED {
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
// of various cylces in our headers. Death to tons of implementations in
// headers!! :-P
- byte ToBooleanTypes(unsigned ast_id);
+ byte ToBooleanTypes(TypeFeedbackId ast_id);
// Get type information for arithmetic operations and compares.
TypeInfo UnaryType(UnaryOperation* expr);
@@ -293,13 +294,15 @@ class TypeFeedbackOracle BASE_EMBEDDED {
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);
+ Zone* zone() const { return zone_; }
+
private:
- void CollectReceiverTypes(unsigned ast_id,
+ void CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<String> name,
Code::Flags flags,
SmallMapList* types);
- void SetInfo(unsigned ast_id, Object* target);
+ void SetInfo(TypeFeedbackId ast_id, Object* target);
void BuildDictionary(Handle<Code> code);
void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
@@ -312,11 +315,12 @@ class TypeFeedbackOracle BASE_EMBEDDED {
// Returns an element from the backing store. Returns undefined if
// there is no information.
- Handle<Object> GetInfo(unsigned ast_id);
+ Handle<Object> GetInfo(TypeFeedbackId ast_id);
- Handle<Context> global_context_;
+ Handle<Context> native_context_;
Isolate* isolate_;
Handle<UnseededNumberDictionary> dictionary_;
+ Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/src/3rdparty/v8/src/unicode-inl.h b/src/3rdparty/v8/src/unicode-inl.h
index 9c0ebf9..ec9c69f 100644
--- a/src/3rdparty/v8/src/unicode-inl.h
+++ b/src/3rdparty/v8/src/unicode-inl.h
@@ -29,6 +29,7 @@
#define V8_UNICODE_INL_H_
#include "unicode.h"
+#include "checks.h"
namespace unibrow {
@@ -144,6 +145,7 @@ uchar CharacterStream::GetNext() {
} else {
remaining_--;
}
+ ASSERT(BoundsCheck(cursor_));
return result;
}
diff --git a/src/3rdparty/v8/src/unicode.h b/src/3rdparty/v8/src/unicode.h
index 94ab1b4..00227c2 100644
--- a/src/3rdparty/v8/src/unicode.h
+++ b/src/3rdparty/v8/src/unicode.h
@@ -28,7 +28,9 @@
#ifndef V8_UNICODE_H_
#define V8_UNICODE_H_
+#ifndef _WIN32_WCE
#include <sys/types.h>
+#endif
/**
* \file
@@ -201,6 +203,7 @@ class CharacterStream {
protected:
virtual void FillBuffer() = 0;
+ virtual bool BoundsCheck(unsigned offset) = 0;
// The number of characters left in the current buffer
unsigned remaining_;
// The current offset within the buffer
@@ -228,6 +231,9 @@ class InputBuffer : public CharacterStream {
InputBuffer() { }
explicit InputBuffer(Input input) { Reset(input); }
virtual void FillBuffer();
+ virtual bool BoundsCheck(unsigned offset) {
+ return (buffer_ != util_buffer_) || (offset < kSize);
+ }
// A custom offset that can be used by the string implementation to
// mark progress within the encoded string.
diff --git a/src/3rdparty/v8/src/utils.h b/src/3rdparty/v8/src/utils.h
index f116c14..e03f96f 100644
--- a/src/3rdparty/v8/src/utils.h
+++ b/src/3rdparty/v8/src/utils.h
@@ -248,6 +248,7 @@ class BitField {
// bitfield without compiler warnings we have to compute 2^32 without
// using a shift count of 32.
static const uint32_t kMask = ((1U << shift) << size) - (1U << shift);
+ static const uint32_t kShift = shift;
// Value for the field with all bits set.
static const T kMax = static_cast<T>((1U << size) - 1);
@@ -861,7 +862,11 @@ class EmbeddedContainer {
public:
EmbeddedContainer() : elems_() { }
- int length() { return NumElements; }
+ int length() const { return NumElements; }
+ const ElementType& operator[](int i) const {
+ ASSERT(i < length());
+ return elems_[i];
+ }
ElementType& operator[](int i) {
ASSERT(i < length());
return elems_[i];
@@ -875,7 +880,12 @@ class EmbeddedContainer {
template<typename ElementType>
class EmbeddedContainer<ElementType, 0> {
public:
- int length() { return 0; }
+ int length() const { return 0; }
+ const ElementType& operator[](int i) const {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
ElementType& operator[](int i) {
UNREACHABLE();
static ElementType t = 0;
@@ -973,13 +983,59 @@ class EnumSet {
T Mask(E element) const {
// The strange typing in ASSERT is necessary to avoid stupid warnings, see:
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
- ASSERT(element < static_cast<int>(sizeof(T) * CHAR_BIT));
+ ASSERT(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT));
return 1 << element;
}
T bits_;
};
+
+class TypeFeedbackId {
+ public:
+ explicit TypeFeedbackId(int id) : id_(id) { }
+ int ToInt() const { return id_; }
+
+ static TypeFeedbackId None() { return TypeFeedbackId(kNoneId); }
+ bool IsNone() const { return id_ == kNoneId; }
+
+ private:
+ static const int kNoneId = -1;
+
+ int id_;
+};
+
+
+class BailoutId {
+ public:
+ explicit BailoutId(int id) : id_(id) { }
+ int ToInt() const { return id_; }
+
+ static BailoutId None() { return BailoutId(kNoneId); }
+ static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
+ static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
+ static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
+
+ bool IsNone() const { return id_ == kNoneId; }
+ bool operator==(const BailoutId& other) const { return id_ == other.id_; }
+
+ private:
+ static const int kNoneId = -1;
+
+ // Using 0 could disguise errors.
+ static const int kFunctionEntryId = 2;
+
+ // This AST id identifies the point after the declarations have been visited.
+ // We need it to capture the environment effects of declarations that emit
+ // code (function declarations).
+ static const int kDeclarationsId = 3;
+
+ // Ever FunctionState starts with this id.
+ static const int kFirstUsableId = 4;
+
+ int id_;
+};
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/src/3rdparty/v8/src/v8-counters.cc b/src/3rdparty/v8/src/v8-counters.cc
index c6aa9cb..3f83dff 100644
--- a/src/3rdparty/v8/src/v8-counters.cc
+++ b/src/3rdparty/v8/src/v8-counters.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,11 +34,23 @@ namespace internal {
Counters::Counters() {
#define HT(name, caption) \
- HistogramTimer name = { #caption, NULL, false, 0, 0 }; \
+ HistogramTimer name = { {#caption, 0, 10000, 50, NULL, false}, 0, 0 }; \
name##_ = name;
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define HP(name, caption) \
+ Histogram name = { #caption, 0, 101, 100, NULL, false }; \
+ name##_ = name;
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) \
+ Histogram name = { #caption, 1000, 500000, 50, NULL, false }; \
+ name##_ = name;
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+
#define SC(name, caption) \
StatsCounter name = { "c:" #caption, NULL, false };\
name##_ = name;
@@ -47,6 +59,34 @@ Counters::Counters() {
STATS_COUNTER_LIST_2(SC)
#undef SC
+#define SC(name) \
+ StatsCounter count_of_##name = { "c:" "V8.CountOf_" #name, NULL, false };\
+ count_of_##name##_ = count_of_##name; \
+ StatsCounter size_of_##name = { "c:" "V8.SizeOf_" #name, NULL, false };\
+ size_of_##name##_ = size_of_##name;
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter count_of_CODE_TYPE_##name = { \
+ "c:" "V8.CountOf_CODE_TYPE-" #name, NULL, false }; \
+ count_of_CODE_TYPE_##name##_ = count_of_CODE_TYPE_##name; \
+ StatsCounter size_of_CODE_TYPE_##name = { \
+ "c:" "V8.SizeOf_CODE_TYPE-" #name, NULL, false }; \
+ size_of_CODE_TYPE_##name##_ = size_of_CODE_TYPE_##name;
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter count_of_FIXED_ARRAY_##name = { \
+ "c:" "V8.CountOf_FIXED_ARRAY-" #name, NULL, false }; \
+ count_of_FIXED_ARRAY_##name##_ = count_of_FIXED_ARRAY_##name; \
+ StatsCounter size_of_FIXED_ARRAY_##name = { \
+ "c:" "V8.SizeOf_FIXED_ARRAY-" #name, NULL, false }; \
+ size_of_FIXED_ARRAY_##name##_ = size_of_FIXED_ARRAY_##name;
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
StatsCounter state_counters[] = {
#define COUNTER_NAME(name) \
{ "c:V8.State" #name, NULL, false },
@@ -59,4 +99,18 @@ Counters::Counters() {
}
}
+void Counters::ResetHistograms() {
+#define HT(name, caption) name##_.Reset();
+ HISTOGRAM_TIMER_LIST(HT)
+#undef HT
+
+#define HP(name, caption) name##_.Reset();
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) name##_.Reset();
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+}
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/v8-counters.h b/src/3rdparty/v8/src/v8-counters.h
index 6db9c77..fad3454 100644
--- a/src/3rdparty/v8/src/v8-counters.h
+++ b/src/3rdparty/v8/src/v8-counters.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "counters.h"
+#include "objects.h"
#include "v8globals.h"
namespace v8 {
@@ -50,6 +51,36 @@ namespace internal {
HT(compile_lazy, V8.CompileLazy)
+#define HISTOGRAM_PERCENTAGE_LIST(HP) \
+ HP(external_fragmentation_total, \
+ V8.MemoryExternalFragmentationTotal) \
+ HP(external_fragmentation_old_pointer_space, \
+ V8.MemoryExternalFragmentationOldPointerSpace) \
+ HP(external_fragmentation_old_data_space, \
+ V8.MemoryExternalFragmentationOldDataSpace) \
+ HP(external_fragmentation_code_space, \
+ V8.MemoryExternalFragmentationCodeSpace) \
+ HP(external_fragmentation_map_space, \
+ V8.MemoryExternalFragmentationMapSpace) \
+ HP(external_fragmentation_cell_space, \
+ V8.MemoryExternalFragmentationCellSpace) \
+ HP(external_fragmentation_lo_space, \
+ V8.MemoryExternalFragmentationLoSpace) \
+ HP(heap_fraction_map_space, \
+ V8.MemoryHeapFractionMapSpace) \
+ HP(heap_fraction_cell_space, \
+ V8.MemoryHeapFractionCellSpace) \
+
+
+#define HISTOGRAM_MEMORY_LIST(HM) \
+ HM(heap_sample_total_committed, V8.MemoryHeapSampleTotalCommitted) \
+ HM(heap_sample_total_used, V8.MemoryHeapSampleTotalUsed) \
+ HM(heap_sample_map_space_committed, \
+ V8.MemoryHeapSampleMapSpaceCommitted) \
+ HM(heap_sample_cell_space_committed, \
+ V8.MemoryHeapSampleCellSpaceCommitted)
+
+
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
// Intellisense to crash. It was broken into two macros (each of length 40
// lines) rather than one macro (of length about 80 lines) to work around
@@ -210,6 +241,9 @@ namespace internal {
SC(compute_entry_frame, V8.ComputeEntryFrame) \
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
+ SC(fast_new_closure_total, V8.FastNewClosureTotal) \
+ SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
+ SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
SC(string_add_runtime, V8.StringAddRuntime) \
SC(string_add_native, V8.StringAddNative) \
SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii) \
@@ -240,14 +274,33 @@ namespace internal {
SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
- SC(other_ticks, V8.OtherTicks) \
- SC(js_opt_ticks, V8.JsOptTicks) \
- SC(js_non_opt_ticks, V8.JsNonoptTicks) \
- SC(js_other_ticks, V8.JsOtherTicks) \
SC(smi_checks_removed, V8.SmiChecksRemoved) \
SC(map_checks_removed, V8.MapChecksRemoved) \
SC(quote_json_char_count, V8.QuoteJsonCharacterCount) \
- SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount)
+ SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount) \
+ SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
+ SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
+ SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
+ SC(old_pointer_space_bytes_available, \
+ V8.MemoryOldPointerSpaceBytesAvailable) \
+ SC(old_pointer_space_bytes_committed, \
+ V8.MemoryOldPointerSpaceBytesCommitted) \
+ SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \
+ SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \
+ SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \
+ SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed) \
+ SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable) \
+ SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted) \
+ SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed) \
+ SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable) \
+ SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted) \
+ SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \
+ SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
+ SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
+ SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
+ SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
+ SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
+ SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
// This file contains all the v8 counters that are in use.
@@ -258,20 +311,69 @@ class Counters {
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define HP(name, caption) \
+ Histogram* name() { return &name##_; }
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) \
+ Histogram* name() { return &name##_; }
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+
#define SC(name, caption) \
StatsCounter* name() { return &name##_; }
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
+#define SC(name) \
+ StatsCounter* count_of_##name() { return &count_of_##name##_; } \
+ StatsCounter* size_of_##name() { return &size_of_##name##_; }
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter* count_of_CODE_TYPE_##name() \
+ { return &count_of_CODE_TYPE_##name##_; } \
+ StatsCounter* size_of_CODE_TYPE_##name() \
+ { return &size_of_CODE_TYPE_##name##_; }
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter* count_of_FIXED_ARRAY_##name() \
+ { return &count_of_FIXED_ARRAY_##name##_; } \
+ StatsCounter* size_of_FIXED_ARRAY_##name() \
+ { return &size_of_FIXED_ARRAY_##name##_; }
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
enum Id {
#define RATE_ID(name, caption) k_##name,
HISTOGRAM_TIMER_LIST(RATE_ID)
#undef RATE_ID
+#define PERCENTAGE_ID(name, caption) k_##name,
+ HISTOGRAM_PERCENTAGE_LIST(PERCENTAGE_ID)
+#undef PERCENTAGE_ID
+#define MEMORY_ID(name, caption) k_##name,
+ HISTOGRAM_MEMORY_LIST(MEMORY_ID)
+#undef MEMORY_ID
#define COUNTER_ID(name, caption) k_##name,
STATS_COUNTER_LIST_1(COUNTER_ID)
STATS_COUNTER_LIST_2(COUNTER_ID)
#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOf##name, kSizeOf##name,
+ INSTANCE_TYPE_LIST(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfCODE_TYPE_##name, \
+ kSizeOfCODE_TYPE_##name,
+ CODE_KIND_LIST(COUNTER_ID)
+#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfFIXED_ARRAY__##name, \
+ kSizeOfFIXED_ARRAY__##name,
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
+#undef COUNTER_ID
#define COUNTER_ID(name) k_##name,
STATE_TAG_LIST(COUNTER_ID)
#undef COUNTER_ID
@@ -282,18 +384,48 @@ class Counters {
return &state_counters_[state];
}
+ void ResetHistograms();
+
private:
#define HT(name, caption) \
HistogramTimer name##_;
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define HP(name, caption) \
+ Histogram name##_;
+ HISTOGRAM_PERCENTAGE_LIST(HP)
+#undef HP
+
+#define HM(name, caption) \
+ Histogram name##_;
+ HISTOGRAM_MEMORY_LIST(HM)
+#undef HM
+
#define SC(name, caption) \
StatsCounter name##_;
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
+#define SC(name) \
+ StatsCounter size_of_##name##_; \
+ StatsCounter count_of_##name##_;
+ INSTANCE_TYPE_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter size_of_CODE_TYPE_##name##_; \
+ StatsCounter count_of_CODE_TYPE_##name##_;
+ CODE_KIND_LIST(SC)
+#undef SC
+
+#define SC(name) \
+ StatsCounter size_of_FIXED_ARRAY_##name##_; \
+ StatsCounter count_of_FIXED_ARRAY_##name##_;
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#undef SC
+
enum {
#define COUNTER_ID(name) __##name,
STATE_TAG_LIST(COUNTER_ID)
diff --git a/src/3rdparty/v8/src/v8.cc b/src/3rdparty/v8/src/v8.cc
index 2910a07..7d01582 100644
--- a/src/3rdparty/v8/src/v8.cc
+++ b/src/3rdparty/v8/src/v8.cc
@@ -38,6 +38,7 @@
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "log.h"
+#include "objects.h"
#include "once.h"
#include "platform.h"
#include "runtime-profiler.h"
@@ -106,13 +107,16 @@ void V8::TearDown() {
if (!has_been_set_up_ || has_been_disposed_) return;
+ // The isolate has to be torn down before clearing the LOperand
+ // caches so that the optimizing compiler thread (if running)
+ // doesn't see an inconsistent view of the lithium instructions.
+ isolate->TearDown();
+ delete isolate;
+
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
RegisteredExtension::UnregisterAll();
- isolate->TearDown();
- delete isolate;
-
is_running_ = false;
has_been_disposed_ = true;
@@ -166,7 +170,7 @@ void V8::SetReturnAddressLocationResolver(
// Used by JavaScript APIs
uint32_t V8::Random(Context* context) {
- ASSERT(context->IsGlobalContext());
+ ASSERT(context->IsNativeContext());
ByteArray* seed = context->random_seed();
return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
}
@@ -213,14 +217,22 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void V8::FireCallCompletedCallback(Isolate* isolate) {
- if (call_completed_callbacks_ == NULL) return;
+ bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
+ bool observer_delivery_pending =
+ FLAG_harmony_observation && isolate->observer_delivery_pending();
+ if (!has_call_completed_callbacks && !observer_delivery_pending) return;
HandleScopeImplementer* handle_scope_implementer =
isolate->handle_scope_implementer();
if (!handle_scope_implementer->CallDepthIsZero()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
handle_scope_implementer->IncrementCallDepth();
- for (int i = 0; i < call_completed_callbacks_->length(); i++) {
- call_completed_callbacks_->at(i)();
+ if (observer_delivery_pending) {
+ JSObject::DeliverChangeRecords(isolate);
+ }
+ if (has_call_completed_callbacks) {
+ for (int i = 0; i < call_completed_callbacks_->length(); i++) {
+ call_completed_callbacks_->at(i)();
+ }
}
handle_scope_implementer->DecrementCallDepth();
}
diff --git a/src/3rdparty/v8/src/v8.h b/src/3rdparty/v8/src/v8.h
index 59ce602..201cdf2 100644
--- a/src/3rdparty/v8/src/v8.h
+++ b/src/3rdparty/v8/src/v8.h
@@ -48,6 +48,11 @@
#error both DEBUG and NDEBUG are set
#endif
+// For Windows CE, Windows headers need to be included first as they define ASSERT
+#ifdef _WIN32_WCE
+# include "win32-headers.h"
+#endif
+
// Basic includes
#include "../include/v8.h"
#include "v8globals.h"
@@ -65,6 +70,7 @@
#include "log-inl.h"
#include "cpu-profiler-inl.h"
#include "handles-inl.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
diff --git a/src/3rdparty/v8/src/v8globals.h b/src/3rdparty/v8/src/v8globals.h
index 6a1766a..95390ad 100644
--- a/src/3rdparty/v8/src/v8globals.h
+++ b/src/3rdparty/v8/src/v8globals.h
@@ -52,15 +52,6 @@ const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
const intptr_t kDoubleAlignment = 8;
const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
-// Desired alignment for maps.
-#if V8_HOST_ARCH_64_BIT
-const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
-#else
-const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
-#endif
-const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
-const intptr_t kMapAlignmentMask = kMapAlignment - 1;
-
// Desired alignment for generated code is 32 bytes (to improve cache line
// utilization).
const int kCodeAlignmentBits = 5;
@@ -94,6 +85,7 @@ const uint32_t kDebugZapValue = 0xbadbaddb;
const uint32_t kFreeListZapValue = 0xfeed1eaf;
#endif
+const int kCodeZapValue = 0xbadc0de;
// Number of bits to represent the page size for paged spaces. The value of 20
// gives 1Mb bytes per page.
@@ -126,6 +118,7 @@ class Debugger;
class DebugInfo;
class Descriptor;
class DescriptorArray;
+class TransitionArray;
class ExternalReference;
class FixedArray;
class FunctionTemplateInfo;
@@ -311,14 +304,6 @@ typedef void (*StoreBufferCallback)(Heap* heap,
StoreBufferEvent event);
-// Whether to remove map transitions and constant transitions from a
-// DescriptorArray.
-enum TransitionFlag {
- REMOVE_TRANSITIONS,
- KEEP_TRANSITIONS
-};
-
-
// Union used for fast testing of specific double values.
union DoubleRepresentation {
double value;
@@ -366,11 +351,12 @@ struct AccessorDescriptor {
// VMState object leaves a state by popping the current state from the
// stack.
-#define STATE_TAG_LIST(V) \
- V(JS) \
- V(GC) \
- V(COMPILER) \
- V(OTHER) \
+#define STATE_TAG_LIST(V) \
+ V(JS) \
+ V(GC) \
+ V(COMPILER) \
+ V(PARALLEL_COMPILER_PROLOGUE) \
+ V(OTHER) \
V(EXTERNAL)
enum StateTag {
@@ -401,10 +387,6 @@ enum StateTag {
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
-// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
-#define MAP_POINTER_ALIGN(value) \
- (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
-
// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
#define CODE_POINTER_ALIGN(value) \
(((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
@@ -430,6 +412,13 @@ enum StateTag {
#endif
+enum CpuImplementer {
+ UNKNOWN_IMPLEMENTER,
+ ARM_IMPLEMENTER,
+ QUALCOMM_IMPLEMENTER
+};
+
+
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
@@ -442,6 +431,10 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
+ VFP2 = 3, // ARM
+ SUDIV = 4, // ARM
+ UNALIGNED_ACCESSES = 5, // ARM
+ MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS
@@ -483,16 +476,17 @@ const uint64_t kLastNonNaNInt64 =
(static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
+// The order of this enum has to be kept in sync with the predicates below.
enum VariableMode {
// User declared variables:
VAR, // declared via 'var', and 'function' declarations
CONST, // declared via 'const' declarations
- CONST_HARMONY, // declared via 'const' declarations in harmony mode
-
LET, // declared via 'let' declarations
+ CONST_HARMONY, // declared via 'const' declarations in harmony mode
+
// Variables introduced by the compiler:
DYNAMIC, // always require dynamic lookup (we don't know
// the declaration)
@@ -514,6 +508,26 @@ enum VariableMode {
};
+inline bool IsDynamicVariableMode(VariableMode mode) {
+ return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
+}
+
+
+inline bool IsDeclaredVariableMode(VariableMode mode) {
+ return mode >= VAR && mode <= CONST_HARMONY;
+}
+
+
+inline bool IsLexicalVariableMode(VariableMode mode) {
+ return mode >= LET && mode <= CONST_HARMONY;
+}
+
+
+inline bool IsImmutableVariableMode(VariableMode mode) {
+ return mode == CONST || mode == CONST_HARMONY;
+}
+
+
// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
// and immutable bindings that can be in two states: initialized and
// uninitialized. In ES5 only immutable bindings have these two states. When
diff --git a/src/3rdparty/v8/src/v8natives.js b/src/3rdparty/v8/src/v8natives.js
index f1e8084..20fc74d 100644
--- a/src/3rdparty/v8/src/v8natives.js
+++ b/src/3rdparty/v8/src/v8natives.js
@@ -60,7 +60,17 @@ function InstallFunctions(object, attributes, functions) {
%ToFastProperties(object);
}
-// Prevents changes to the prototype of a built-infunction.
+
+// Helper function to install a getter only property.
+function InstallGetter(object, name, getter) {
+ %FunctionSetName(getter, name);
+ %FunctionRemovePrototype(getter);
+ %DefineOrRedefineAccessorProperty(object, name, getter, null, DONT_ENUM);
+ %SetNativeFlag(getter);
+}
+
+
+// Prevents changes to the prototype of a built-in function.
// The "prototype" property of the function object is made non-configurable,
// and the prototype object is made non-extensible. The latter prevents
// changing the __proto__ property.
@@ -337,7 +347,7 @@ function ObjectKeys(obj) {
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
var names = CallTrap0(handler, "keys", DerivedKeysTrap);
- return ToStringArray(names);
+ return ToStringArray(names, "keys");
}
return %LocalKeys(obj);
}
@@ -963,7 +973,7 @@ function ToStringArray(obj, trap) {
var names = {}; // TODO(rossberg): use sets once they are ready.
for (var index = 0; index < n; index++) {
var s = ToString(obj[index]);
- if (s in names) {
+ if (%HasLocalProperty(names, s)) {
throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]);
}
array[index] = s;
@@ -1654,7 +1664,9 @@ function NewFunction(arg1) { // length == 1
// The call to SetNewFunctionAttributes will ensure the prototype
// property of the resulting function is enumerable (ECMA262, 15.3.5.2).
- var f = %CompileString(source)();
+ var global_receiver = %GlobalReceiver(global);
+ var f = %_CallFunction(global_receiver, %CompileString(source));
+
%FunctionMarkNameShouldPrintAsAnonymous(f);
return %SetNewFunctionAttributes(f);
}
diff --git a/src/3rdparty/v8/src/v8threads.cc b/src/3rdparty/v8/src/v8threads.cc
index fd8d536..32ea5e1 100644
--- a/src/3rdparty/v8/src/v8threads.cc
+++ b/src/3rdparty/v8/src/v8threads.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -238,12 +238,18 @@ static int ArchiveSpacePerThread() {
ThreadState::ThreadState(ThreadManager* thread_manager)
: id_(ThreadId::Invalid()),
terminate_on_restore_(false),
+ data_(NULL),
next_(this),
previous_(this),
thread_manager_(thread_manager) {
}
+ThreadState::~ThreadState() {
+ DeleteArray<char>(data_);
+}
+
+
void ThreadState::AllocateSpace() {
data_ = NewArray<char>(ArchiveSpacePerThread());
}
@@ -306,8 +312,19 @@ ThreadManager::ThreadManager()
ThreadManager::~ThreadManager() {
delete mutex_;
- delete free_anchor_;
- delete in_use_anchor_;
+ DeleteThreadStateList(free_anchor_);
+ DeleteThreadStateList(in_use_anchor_);
+}
+
+
+void ThreadManager::DeleteThreadStateList(ThreadState* anchor) {
+ // The list starts and ends with the anchor.
+ for (ThreadState* current = anchor->next_; current != anchor;) {
+ ThreadState* next = current->next_;
+ delete current;
+ current = next;
+ }
+ delete anchor;
}
diff --git a/src/3rdparty/v8/src/v8threads.h b/src/3rdparty/v8/src/v8threads.h
index a2aee4e..8dce860 100644
--- a/src/3rdparty/v8/src/v8threads.h
+++ b/src/3rdparty/v8/src/v8threads.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -57,6 +57,7 @@ class ThreadState {
private:
explicit ThreadState(ThreadManager* thread_manager);
+ ~ThreadState();
void AllocateSpace();
@@ -114,6 +115,8 @@ class ThreadManager {
ThreadManager();
~ThreadManager();
+ void DeleteThreadStateList(ThreadState* anchor);
+
void EagerlyArchiveThread();
Mutex* mutex_;
diff --git a/src/3rdparty/v8/src/v8utils.cc b/src/3rdparty/v8/src/v8utils.cc
index 042a60f..4ab97ed 100644
--- a/src/3rdparty/v8/src/v8utils.cc
+++ b/src/3rdparty/v8/src/v8utils.cc
@@ -31,7 +31,9 @@
#include "platform.h"
+#ifndef _WIN32_WCE
#include "sys/stat.h"
+#endif
namespace v8 {
namespace internal {
@@ -45,7 +47,7 @@ void PrintF(const char* format, ...) {
}
-void PrintF(FILE* out, const char* format, ...) {
+void FPrintF(FILE* out, const char* format, ...) {
va_list arguments;
va_start(arguments, format);
OS::VFPrint(out, format, arguments);
@@ -53,6 +55,15 @@ void PrintF(FILE* out, const char* format, ...) {
}
+void PrintPID(const char* format, ...) {
+ OS::Print("[%d] ", OS::GetCurrentProcessId());
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+}
+
+
void Flush(FILE* out) {
fflush(out);
}
@@ -124,7 +135,11 @@ char* ReadCharsFromFile(FILE* file,
// Get the size of the file and rewind it.
*size = ftell(file);
+#ifdef _WIN32_WCE
+ fseek(file, 0, SEEK_SET);
+#else
rewind(file);
+#endif // _WIN32_WCE
char* result = NewArray<char>(*size + extra_space);
for (int i = 0; i < *size && feof(file) == 0;) {
diff --git a/src/3rdparty/v8/src/v8utils.h b/src/3rdparty/v8/src/v8utils.h
index bb587e1..cb018e6 100644
--- a/src/3rdparty/v8/src/v8utils.h
+++ b/src/3rdparty/v8/src/v8utils.h
@@ -55,7 +55,10 @@ namespace internal {
// Our version of printf().
void PRINTF_CHECKING PrintF(const char* format, ...);
-void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
+void FPRINTF_CHECKING FPrintF(FILE* out, const char* format, ...);
+
+// Prepends the current process ID to the output.
+void PRINTF_CHECKING PrintPID(const char* format, ...);
// Our version of fflush.
void Flush(FILE* out);
diff --git a/src/3rdparty/v8/src/variables.cc b/src/3rdparty/v8/src/variables.cc
index 4984cbd..3e735d6 100644
--- a/src/3rdparty/v8/src/variables.cc
+++ b/src/3rdparty/v8/src/variables.cc
@@ -41,7 +41,7 @@ const char* Variable::Mode2String(VariableMode mode) {
switch (mode) {
case VAR: return "VAR";
case CONST: return "CONST";
- case CONST_HARMONY: return "CONST";
+ case CONST_HARMONY: return "CONST_HARMONY";
case LET: return "LET";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
@@ -82,10 +82,11 @@ Variable::Variable(Scope* scope,
}
-bool Variable::is_global() const {
+bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
+ return mode_ != TEMPORARY && !IsLexicalVariableMode(mode_)
+ && scope_ != NULL && scope_->is_global_scope();
}
diff --git a/src/3rdparty/v8/src/variables.h b/src/3rdparty/v8/src/variables.h
index 43b2c81..d4e851b 100644
--- a/src/3rdparty/v8/src/variables.h
+++ b/src/3rdparty/v8/src/variables.h
@@ -55,7 +55,7 @@ class Variable: public ZoneObject {
UNALLOCATED,
// A slot in the parameter section on the stack. index() is the
- // parameter index, counting left-to-right. The reciever is index -1;
+ // parameter index, counting left-to-right. The receiver is index -1;
// the first parameter is index 0.
PARAMETER,
@@ -118,21 +118,14 @@ class Variable: public ZoneObject {
bool IsStackAllocated() const { return IsParameter() || IsStackLocal(); }
bool IsContextSlot() const { return location_ == CONTEXT; }
bool IsLookupSlot() const { return location_ == LOOKUP; }
+ bool IsGlobalObjectProperty() const;
- bool is_dynamic() const {
- return (mode_ == DYNAMIC ||
- mode_ == DYNAMIC_GLOBAL ||
- mode_ == DYNAMIC_LOCAL);
- }
- bool is_const_mode() const {
- return (mode_ == CONST ||
- mode_ == CONST_HARMONY);
- }
+ bool is_dynamic() const { return IsDynamicVariableMode(mode_); }
+ bool is_const_mode() const { return IsImmutableVariableMode(mode_); }
bool binding_needs_init() const {
return initialization_flag_ == kNeedsInitialization;
}
- bool is_global() const;
bool is_this() const { return kind_ == THIS; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
diff --git a/src/3rdparty/v8/src/version.cc b/src/3rdparty/v8/src/version.cc
index 79b6ebd..3132b5e 100644
--- a/src/3rdparty/v8/src/version.cc
+++ b/src/3rdparty/v8/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 11
-#define BUILD_NUMBER 4
+#define MINOR_VERSION 15
+#define BUILD_NUMBER 2
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/3rdparty/v8/src/vm-state-inl.h b/src/3rdparty/v8/src/vm-state-inl.h
index c647e56..384940d 100644
--- a/src/3rdparty/v8/src/vm-state-inl.h
+++ b/src/3rdparty/v8/src/vm-state-inl.h
@@ -47,6 +47,8 @@ inline const char* StateToString(StateTag state) {
return "GC";
case COMPILER:
return "COMPILER";
+ case PARALLEL_COMPILER_PROLOGUE:
+ return "PARALLEL_COMPILER_PROLOGUE";
case OTHER:
return "OTHER";
case EXTERNAL:
diff --git a/src/3rdparty/v8/src/win32-headers.h b/src/3rdparty/v8/src/win32-headers.h
index 5d9c89e..b476efe 100644
--- a/src/3rdparty/v8/src/win32-headers.h
+++ b/src/3rdparty/v8/src/win32-headers.h
@@ -56,7 +56,9 @@
#include <windows.h>
#ifdef V8_WIN32_HEADERS_FULL
+#ifndef _WIN32_WCE
#include <signal.h> // For raise().
+#endif // _WIN32_WCE
#include <time.h> // For LocalOffset() implementation.
#include <mmsystem.h> // For timeGetTime().
#ifdef __MINGW32__
@@ -65,10 +67,10 @@
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x501
#endif // __MINGW32__
-#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
+#if (!defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)) && !defined(_WIN32_WCE)
#include <dbghelp.h> // For SymLoadModule64 and al.
#include <errno.h> // For STRUNCATE
-#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
+#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) && !defined(_WIN32_WCE)
#include <limits.h> // For INT_MAX and al.
#include <tlhelp32.h> // For Module32First and al.
@@ -76,13 +78,26 @@
// makes it impossible to have them elsewhere.
#include <winsock2.h>
#include <ws2tcpip.h>
-#ifndef __MINGW32__
+#if !defined(__MINGW32__) && !defined(_WIN32_WCE)
#include <wspiapi.h>
-#endif // __MINGW32__
+#endif // __MINGW32__ && !defined(_WIN32_WCE)
+#ifndef _WIN32_WCE
#include <process.h> // For _beginthreadex().
+#endif
#include <stdlib.h>
#endif // V8_WIN32_HEADERS_FULL
+#ifdef _WIN32_WCE
+#ifdef DebugBreak
+#undef DebugBreak
+inline void DebugBreak() { __debugbreak(); };
+#endif // DebugBreak
+
+#ifndef _IOFBF
+#define _IOFBF 0x0000
+#endif
+#endif
+
#undef VOID
#undef DELETE
#undef IN
@@ -98,3 +113,4 @@
#undef GetObject
#undef CreateMutex
#undef CreateSemaphore
+#undef interface
diff --git a/src/3rdparty/v8/src/x64/assembler-x64-inl.h b/src/3rdparty/v8/src/x64/assembler-x64-inl.h
index a9cc2ef..f864174 100644
--- a/src/3rdparty/v8/src/x64/assembler-x64-inl.h
+++ b/src/3rdparty/v8/src/x64/assembler-x64-inl.h
@@ -42,6 +42,9 @@ namespace internal {
// Implementation of Assembler
+static const byte kCallOpcode = 0xE8;
+
+
void Assembler::emitl(uint32_t x) {
Memory::uint32_at(pc_) = x;
pc_ += sizeof(uint32_t);
@@ -65,10 +68,10 @@ void Assembler::emitw(uint16_t x) {
void Assembler::emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
} else {
RecordRelocInfo(rmode);
}
@@ -195,6 +198,12 @@ void Assembler::set_target_address_at(Address pc, Address target) {
CPU::FlushICache(pc, sizeof(int32_t));
}
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[Memory::int32_at(pc)];
}
@@ -211,6 +220,12 @@ void RelocInfo::apply(intptr_t delta) {
} else if (IsCodeTarget(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
CPU::FlushICache(pc_, sizeof(int32_t));
+ } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ if (*pc_ == kCallOpcode) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= static_cast<int32_t>(delta); // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
+ }
}
}
@@ -309,10 +324,7 @@ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- Object* object = HeapObject::FromAddress(
- address - JSGlobalPropertyCell::kValueOffset);
- return reinterpret_cast<JSGlobalPropertyCell*>(object);
+ return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
}
@@ -352,6 +364,21 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + 1));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(*pc_ == kCallOpcode);
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+}
+
+
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -405,6 +432,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -433,6 +462,8 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.cc b/src/3rdparty/v8/src/x64/assembler-x64.cc
index 2f0c542..1f5bea9 100644
--- a/src/3rdparty/v8/src/x64/assembler-x64.cc
+++ b/src/3rdparty/v8/src/x64/assembler-x64.cc
@@ -75,6 +75,7 @@ void CpuFeatures::Probe() {
// Save old rsp, since we are going to modify the stack.
__ push(rbp);
__ pushfq();
+ __ push(rdi);
__ push(rcx);
__ push(rbx);
__ movq(rbp, rsp);
@@ -128,6 +129,7 @@ void CpuFeatures::Probe() {
__ movq(rsp, rbp);
__ pop(rbx);
__ pop(rcx);
+ __ pop(rdi);
__ popfq();
__ pop(rbp);
__ ret(0);
@@ -347,8 +349,7 @@ static void InitCoverageLog();
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
code_targets_(100),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
+ positions_recorder_(this) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -467,7 +468,7 @@ void Assembler::bind_to(Label* L, int pos) {
static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
ASSERT(offset_to_next <= 0);
int disp = pos - (fixup_pos + sizeof(int8_t));
- ASSERT(is_int8(disp));
+ CHECK(is_int8(disp));
set_byte_at(fixup_pos, disp);
if (offset_to_next < 0) {
L->link_to(fixup_pos + offset_to_next, Label::kNear);
@@ -875,7 +876,7 @@ void Assembler::call(Label* L) {
void Assembler::call(Handle<Code> target,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
@@ -1232,7 +1233,16 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
const int long_size = 6;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
+ // Determine whether we can use 1-byte offsets for backwards branches,
+ // which have a max range of 128 bytes.
+
+ // We also need to check predictable_code_size() flag here, because on x64,
+ // when the full code generator recompiles code for debugging, some places
+ // need to be padded out to a certain size. The debugger is keeping track of
+ // how often it did this so that it can adjust return addresses on the
+ // stack, but if the size of jump instructions can also change, that's not
+ // enough and the calculated offsets would be incorrect.
+ if (is_int8(offs - short_size) && !predictable_code_size()) {
// 0111 tttn #8-bit disp.
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
@@ -1289,7 +1299,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
if (L->is_bound()) {
int offs = L->pos() - pc_offset() - 1;
ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
+ if (is_int8(offs - short_size) && !predictable_code_size()) {
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - short_size) & 0xFF);
@@ -3035,7 +3045,8 @@ void Assembler::RecordComment(const char* msg, bool force) {
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE;
+ 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::CODE_AGE_SEQUENCE;
bool RelocInfo::IsCodedSpecially() {
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.h b/src/3rdparty/v8/src/x64/assembler-x64.h
index 9f5f850..5f9e147 100644
--- a/src/3rdparty/v8/src/x64/assembler-x64.h
+++ b/src/3rdparty/v8/src/x64/assembler-x64.h
@@ -455,6 +455,7 @@ class CpuFeatures : public AllStatic {
ASSERT(initialized_);
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
+ if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == RDTSC && !FLAG_enable_rdtsc) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
@@ -557,9 +558,6 @@ class Assembler : public AssemblerBase {
Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -575,6 +573,10 @@ class Assembler : public AssemblerBase {
static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target);
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ static inline Address target_address_from_return_address(Address pc);
+
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -614,6 +616,7 @@ class Assembler : public AssemblerBase {
static const int kCallInstructionLength = 13;
static const int kJSReturnSequenceLength = 13;
static const int kShortCallInstructionLength = 5;
+ static const int kPatchDebugBreakSlotReturnOffset = 4;
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
@@ -1010,6 +1013,14 @@ class Assembler : public AssemblerBase {
shift(dst, imm8, 0x1);
}
+ void rorl(Register dst, Immediate imm8) {
+ shift_32(dst, imm8, 0x1);
+ }
+
+ void rorl_cl(Register dst) {
+ shift_32(dst, 0x1);
+ }
+
// Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src);
@@ -1201,7 +1212,7 @@ class Assembler : public AssemblerBase {
void call(Label* L);
void call(Handle<Code> target,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
// Calls directly to the given address using a relative offset.
// Should only ever be used in Code objects for calls within the
@@ -1430,9 +1441,6 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
- protected:
- bool emit_debug_code() const { return emit_debug_code_; }
-
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1451,7 +1459,7 @@ class Assembler : public AssemblerBase {
inline void emitw(uint16_t x);
inline void emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
void emit(Immediate x) { emitl(x.value_); }
// Emits a REX prefix that encodes a 64-bit operand size and
@@ -1634,9 +1642,6 @@ class Assembler : public AssemblerBase {
List< Handle<Code> > code_targets_;
PositionsRecorder positions_recorder_;
-
- bool emit_debug_code_;
-
friend class PositionsRecorder;
};
diff --git a/src/3rdparty/v8/src/x64/builtins-x64.cc b/src/3rdparty/v8/src/x64/builtins-x64.cc
index 4e037ff..ed0ec68 100644
--- a/src/3rdparty/v8/src/x64/builtins-x64.cc
+++ b/src/3rdparty/v8/src/x64/builtins-x64.cc
@@ -73,6 +73,45 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ movq(kScratchRegister,
+ FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(kScratchRegister,
+ FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
+ __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ __ jmp(kScratchRegister);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
+
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -567,6 +606,46 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Re-execute the code that was patched back to the young age when
+ // the stub returns.
+ __ subq(Operand(rsp, 0), Immediate(5));
+ __ Pushad();
+#ifdef _WIN64
+ __ movq(rcx, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+#else
+ __ movq(rdi, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+#endif
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ }
+ __ Popad();
+ __ ret(0);
+}
+
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
@@ -711,9 +790,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
__ movq(rbx, FieldOperand(rbx, kGlobalIndex));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
@@ -896,9 +975,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
__ movq(rbx, FieldOperand(rbx, kGlobalOffset));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
@@ -977,7 +1056,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -1076,7 +1155,8 @@ static void AllocateJSArray(MacroAssembler* masm,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
- __ LoadInitialArrayMap(array_function, scratch, elements_array);
+ __ LoadInitialArrayMap(array_function, scratch,
+ elements_array, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ testq(array_size, array_size);
@@ -1303,10 +1383,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(call_generic_code);
__ bind(&not_double);
- // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// rbx: JSArray
__ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r11,
kScratchRegister,
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.cc b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
index 7069829..06ce52a 100644
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.cc
+++ b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
@@ -62,9 +62,13 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in rsi.
+ Counters* counters = masm->isolate()->counters();
+
Label gc;
__ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
+ __ IncrementCounter(counters->fast_new_closure_total(), 1);
+
// Get the function info from the stack.
__ movq(rdx, Operand(rsp, 1 * kPointerSize));
@@ -72,36 +76,113 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current global context and set that
+ // Compute the function map in the current native context and set that
// as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
+ __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
+ __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
+ __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
__ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
__ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
+ __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
__ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
__ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
__ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ Label check_optimized;
+ Label install_unoptimized;
+ if (FLAG_cache_optimized_code) {
+ __ movq(rbx,
+ FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ testq(rbx, rbx);
+ __ j(not_zero, &check_optimized, Label::kNear);
+ }
+ __ bind(&install_unoptimized);
+ __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
+ rdi); // Initialize with undefined.
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ __ bind(&check_optimized);
+
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
+
+ // rcx holds native context, ebx points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // The optimized code map must never be empty, so check the first elements.
+ Label install_optimized;
+ // Speculatively move code object into edx.
+ __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
+ __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
+ __ j(equal, &install_optimized);
+
+ // Iterate through the rest of map backwards. rdx holds an index.
+ Label loop;
+ Label restore;
+ __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rdx, rdx);
+ __ bind(&loop);
+ // Do not double check first entry.
+ __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
+ __ j(equal, &restore);
+ __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry.
+ __ cmpq(rcx, FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &loop, Label::kNear);
+ // Hit: fetch the optimized code.
+ __ movq(rdx, FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ __ bind(&install_optimized);
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
+
+ // TODO(fschneider): Idea: store proper code pointers in the map and either
+ // unmangle them on marking or do nothing as the whole map is discarded on
+ // major GC anyway.
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+
+ // Now link a function into a list of optimized functions.
+ __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+ __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
+ // No need for write barrier as JSFunction (rax) is in the new space.
+
+ __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
+ // Store JSFunction (rax) into rdx before issuing write barrier as
+ // it clobbers all the registers passed.
+ __ movq(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx,
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+ rdx,
+ rbx,
+ kDontSaveFPRegs);
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
+ __ bind(&restore);
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ __ jmp(&install_unoptimized);
+
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ pop(rcx); // Temporarily remove return address.
@@ -136,12 +217,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
// Copy the global object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
// Copy the qmlglobal object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), rbx);
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)), rbx);
// Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -182,9 +263,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
- // If this block context is nested in the global context we get a smi
+ // If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
+ // canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
@@ -194,7 +275,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(equal, message);
}
__ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
__ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
@@ -204,12 +285,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
// Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
+ __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
// Copy the qmlglobal object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::QML_GLOBAL_INDEX));
- __ movq(ContextOperand(rax, Context::QML_GLOBAL_INDEX), rbx);
+ __ movq(rbx, ContextOperand(rsi, Context::QML_GLOBAL_OBJECT_INDEX));
+ __ movq(ContextOperand(rax, Context::QML_GLOBAL_OBJECT_INDEX), rbx);
// Initialize the rest of the slots to the hole value.
__ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
@@ -1007,8 +1088,8 @@ void BinaryOpStub::GenerateSmiCode(
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
// Arguments to BinaryOpStub are in rdx and rax.
- Register left = rdx;
- Register right = rax;
+ const Register left = rdx;
+ const Register right = rax;
// We only generate heapnumber answers for overflowing calculations
// for the four basic arithmetic operations and logical right shift by 0.
@@ -1050,20 +1131,16 @@ void BinaryOpStub::GenerateSmiCode(
case Token::DIV:
// SmiDiv will not accept left in rdx or right in rax.
- left = rcx;
- right = rbx;
__ movq(rbx, rax);
__ movq(rcx, rdx);
- __ SmiDiv(rax, left, right, &use_fp_on_smis);
+ __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
break;
case Token::MOD:
// SmiMod will not accept left in rdx or right in rax.
- left = rcx;
- right = rbx;
__ movq(rbx, rax);
__ movq(rcx, rdx);
- __ SmiMod(rax, left, right, &use_fp_on_smis);
+ __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
break;
case Token::BIT_OR: {
@@ -1228,11 +1305,9 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
&allocation_failed,
TAG_OBJECT);
// Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ movq(FieldOperand(rax, HeapObject::kMapOffset),
heap_number_map);
__ cvtqsi2sd(xmm0, rbx);
@@ -1980,10 +2055,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
__ bind(&first_smi);
- if (FLAG_debug_code) {
- // Second should be non-smi if we get here.
- __ AbortIfSmi(second);
- }
+ __ AssertNotSmi(second);
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, on_not_smis);
// Convert second to smi, if possible.
@@ -2193,21 +2265,28 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg, while_true, no_multiply;
+ Label no_neg, while_true, while_false;
__ testl(scratch, scratch);
__ j(positive, &no_neg, Label::kNear);
__ negl(scratch);
__ bind(&no_neg);
- __ bind(&while_true);
+ __ j(zero, &while_false, Label::kNear);
__ shrl(scratch, Immediate(1));
- __ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(double_result, double_scratch);
- __ bind(&no_multiply);
+ // Above condition means CF==0 && ZF==0. This means that the
+ // bit that has been shifted out is 0 and the result is not 0.
+ __ j(above, &while_true, Label::kNear);
+ __ movsd(double_result, double_scratch);
+ __ j(zero, &while_false, Label::kNear);
+ __ bind(&while_true);
+ __ shrl(scratch, Immediate(1));
__ mulsd(double_scratch, double_scratch);
+ __ j(above, &while_true, Label::kNear);
+ __ mulsd(double_result, double_scratch);
__ j(not_zero, &while_true);
+ __ bind(&while_false);
// If the exponent is negative, return 1/result.
__ testl(exponent, exponent);
__ j(greater, &done);
@@ -2387,10 +2466,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rax = address of new object(s) (tagged)
// rcx = argument count (untagged)
- // Get the arguments boilerplate from the current (global) context into rdi.
+ // Get the arguments boilerplate from the current native context into rdi.
Label has_mapped_parameters, copy;
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
__ testq(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -2533,7 +2612,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
__ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -2603,9 +2682,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
- // Get the arguments boilerplate from the current (global) context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ // Get the arguments boilerplate from the current native context.
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
__ movq(rdi, Operand(rdi, offset));
@@ -2722,7 +2801,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Calculate number of capture registers (number_of_captures + 1) * 2.
__ leal(rdx, Operand(rdx, rdx, times_1, 2));
// Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize));
__ j(above, &runtime);
// rax: RegExp data (FixedArray)
@@ -2872,30 +2951,37 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
+ static const int kRegExpExecuteArguments = 9;
int argument_slots_on_stack =
masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
__ EnterApiExitFrame(argument_slots_on_stack);
- // Argument 8: Pass current isolate address.
+ // Argument 9: Pass current isolate address.
// __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
// Immediate(ExternalReference::isolate_address()));
__ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
kScratchRegister);
- // Argument 7: Indicate that this is a direct call from JavaScript.
+ // Argument 8: Indicate that this is a direct call from JavaScript.
__ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
Immediate(1));
- // Argument 6: Start (high end) of backtracking stack memory area.
+ // Argument 7: Start (high end) of backtracking stack memory area.
__ movq(kScratchRegister, address_of_regexp_stack_memory_address);
__ movq(r9, Operand(kScratchRegister, 0));
__ movq(kScratchRegister, address_of_regexp_stack_memory_size);
__ addq(r9, Operand(kScratchRegister, 0));
- // Argument 6 passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
__ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ // Argument 6 is passed in r9 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
+ Immediate(0));
+#else
+ __ Set(r9, 0);
#endif
// Argument 5: static offsets vector buffer.
@@ -2903,7 +2989,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference::address_of_static_offsets_vector(isolate));
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
#endif
// First four arguments are passed in registers on both Linux and Windows.
@@ -2968,7 +3054,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
Label exception;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ cmpl(rax, Immediate(1));
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
__ j(equal, &success, Label::kNear);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
__ j(equal, &exception);
@@ -3125,8 +3213,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// r8: Number of array elements as smi.
// Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
__ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
@@ -3157,14 +3245,14 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set length.
__ Integer32ToSmi(rdx, rbx);
__ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with the-hole.
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
+ // Fill contents of fixed-array with undefined.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with hole.
+ // Fill fixed array elements with undefined.
// rax: JSArray.
// rbx: Number of elements in array that remains to be filled, as int32.
// rcx: Start of elements in FixedArray.
- // rdx: the hole.
+ // rdx: undefined.
Label loop;
__ testl(rbx, rbx);
__ bind(&loop);
@@ -3349,13 +3437,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ jmp(&not_user_equal);
__ bind(&user_equal);
-
+
__ pop(rbx); // Return address.
__ push(rax);
__ push(rdx);
__ push(rbx);
__ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
+
__ bind(&not_user_equal);
}
@@ -4691,7 +4779,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label non_ascii, allocated, ascii_data;
__ movl(rcx, r8);
__ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testl(rcx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
@@ -4717,9 +4805,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ testb(rcx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
__ xor_(r8, r9);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
+ __ andb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
+ __ cmpb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
__ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
@@ -5239,7 +5327,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
@@ -5283,7 +5371,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_sequential);
@@ -5887,8 +5975,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
ASSERT(!name.is(r0));
ASSERT(!name.is(r1));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
__ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
__ decl(r0);
@@ -6044,18 +6131,20 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
{ REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiOnlyToObject
- // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // ElementsTransitionGenerator::GenerateMapChangeElementTransition
+ // and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
{ REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
+ // FastNewClosureStub::Generate
+ { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -6100,6 +6189,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
}
+bool CodeStub::CanUseFPRegisters() {
+ return true; // Always have SSE2 on x64.
+}
+
+
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
@@ -6232,6 +6326,17 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_object;
+ __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ and_(regs_.scratch0(), regs_.object());
+ __ movq(regs_.scratch1(),
+ Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ subq(regs_.scratch1(), Immediate(1));
+ __ movq(Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset),
+ regs_.scratch1());
+ __ j(negative, &need_incremental);
+
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),
@@ -6323,9 +6428,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(rdi, &double_elements);
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(rax, &smi_element);
- __ CheckFastSmiOnlyElements(rdi, &fast_elements);
+ __ CheckFastSmiElements(rdi, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -6343,7 +6448,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// place.
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
@@ -6357,8 +6462,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK);
__ ret(0);
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
+ // FAST_*_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ SmiToInteger32(kScratchRegister, rcx);
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
@@ -6379,6 +6484,74 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ ProfileEntryHookStub stub;
+ masm->CallStub(&stub);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // Save volatile registers.
+ // Live registers at this point are the same as at the start of any
+ // JS function:
+ // o rdi: the JS function object being called (i.e. ourselves)
+ // o rsi: our context
+ // o rbp: our caller's frame pointer
+ // o rsp: stack pointer (pointing to return address)
+ // o rcx: rcx is zero for method calls and non-zero for function calls.
+#ifdef _WIN64
+ const int kNumSavedRegisters = 1;
+
+ __ push(rcx);
+#else
+ const int kNumSavedRegisters = 3;
+
+ __ push(rcx);
+ __ push(rdi);
+ __ push(rsi);
+#endif
+
+ // Calculate the original stack pointer and store it in the second arg.
+#ifdef _WIN64
+ __ lea(rdx, Operand(rsp, kNumSavedRegisters * kPointerSize));
+#else
+ __ lea(rsi, Operand(rsp, kNumSavedRegisters * kPointerSize));
+#endif
+
+ // Calculate the function address to the first arg.
+#ifdef _WIN64
+ __ movq(rcx, Operand(rdx, 0));
+ __ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
+#else
+ __ movq(rdi, Operand(rsi, 0));
+ __ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
+#endif
+
+ // Call the entry hook function.
+ __ movq(rax, &entry_hook_, RelocInfo::NONE);
+ __ movq(rax, Operand(rax, 0));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+
+ const int kArgumentCount = 2;
+ __ PrepareCallCFunction(kArgumentCount);
+ __ CallCFunction(rax, kArgumentCount);
+
+ // Restore volatile regs.
+#ifdef _WIN64
+ __ pop(rcx);
+#else
+ __ pop(rsi);
+ __ pop(rdi);
+ __ pop(rcx);
+#endif
+
+ __ Ret();
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.cc b/src/3rdparty/v8/src/x64/codegen-x64.cc
index a8d39b2..ffccf47 100644
--- a/src/3rdparty/v8/src/x64/codegen-x64.cc
+++ b/src/3rdparty/v8/src/x64/codegen-x64.cc
@@ -220,7 +220,7 @@ ModuloFunction CreateModuloFunction() {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
@@ -241,7 +241,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
}
-void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- rax : value
@@ -551,7 +551,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Dispatch on the encoding: ASCII or two-byte.
Label ascii;
__ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii, Label::kNear);
@@ -577,6 +577,91 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
#undef __
+
+static const int kNoCodeAgeSequenceLength = 6;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ static bool initialized = false;
+ static byte sequence[kNoCodeAgeSequenceLength];
+ *length = kNoCodeAgeSequenceLength;
+ if (!initialized) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->push(rbp);
+ patcher.masm()->movq(rbp, rsp);
+ patcher.masm()->push(rsi);
+ patcher.masm()->push(rdi);
+ initialized = true;
+ }
+ return sequence;
+}
+
+
+byte* Code::FindPlatformCodeAgeSequence() {
+ byte* start = instruction_start();
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (!memcmp(start, young_sequence, young_length) ||
+ *start == kCallOpcode) {
+ return start;
+ } else {
+ byte* start_after_strict = NULL;
+ if (kind() == FUNCTION) {
+ start_after_strict = start + kSizeOfFullCodegenStrictModePrologue;
+ } else {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ start_after_strict = start + kSizeOfOptimizedStrictModePrologue;
+ }
+ ASSERT(!memcmp(start_after_strict, young_sequence, young_length) ||
+ *start_after_strict == kCallOpcode);
+ return start_after_strict;
+ }
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = (!memcmp(sequence, young_sequence, young_length));
+ ASSERT(result || *sequence == kCallOpcode);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAge) {
+ memcpy(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(age, parity);
+ CodePatcher patcher(sequence, young_length);
+ patcher.masm()->call(stub->instruction_start());
+ patcher.masm()->nop();
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.h b/src/3rdparty/v8/src/x64/codegen-x64.h
index 2e80751..5d8bbff 100644
--- a/src/3rdparty/v8/src/x64/codegen-x64.h
+++ b/src/3rdparty/v8/src/x64/codegen-x64.h
@@ -39,6 +39,8 @@ class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+static const int kSizeOfFullCodegenStrictModePrologue = 14;
+static const int kSizeOfOptimizedStrictModePrologue = 14;
// -------------------------------------------------------------------------
// CodeGenerator
diff --git a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc b/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
index f3046b9..a3fe8f9 100644
--- a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
+++ b/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
@@ -52,6 +52,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (!function->IsOptimized()) return;
+ // The optimized code is going to be patched, so we cannot use it
+ // any more. Play safe and reset the whole cache.
+ function->shared()->ClearOptimizedCodeMap();
+
// Get the optimized code.
Code* code = function->code();
@@ -100,8 +104,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Set the code for the function to non-optimized version.
- function->ReplaceCode(function->shared()->code());
+ ReplaceCodeForRelatedFunctions(function, code);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@@ -188,11 +191,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
}
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
- if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
@@ -214,7 +217,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// the ast id. Confusing.
ASSERT(bailout_id_ == ast_id);
- int bailout_id = LookupBailoutId(data, ast_id);
+ int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
@@ -234,9 +237,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
- USE(function);
- ASSERT(function == function_);
+ int closure_id = iterator.Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
@@ -341,15 +344,15 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(pc);
}
Code* continuation =
- function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
+ function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function));
- function->PrintName();
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
}
}
@@ -576,16 +579,143 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (FLAG_trace_deopt) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 1 stack entry for the return address + 4 stack entries from
+ // StackFrame::INTERNAL (FP, context, frame type, code object, see
+ // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
+ // entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
- int node_id = iterator->Next();
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ BailoutId node_id = BailoutId(iterator->Next());
+ JSFunction* function;
+ if (frame_index != 0) {
+ function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ } else {
+ int closure_id = iterator->Next();
+ USE(closure_id);
+ ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+ function = function_;
+ }
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
diff --git a/src/3rdparty/v8/src/x64/disasm-x64.cc b/src/3rdparty/v8/src/x64/disasm-x64.cc
index 0738153..c8606c4 100644
--- a/src/3rdparty/v8/src/x64/disasm-x64.cc
+++ b/src/3rdparty/v8/src/x64/disasm-x64.cc
@@ -703,6 +703,9 @@ int DisassemblerX64::F6F7Instruction(byte* data) {
case 4:
mnem = "mul";
break;
+ case 5:
+ mnem = "imul";
+ break;
case 7:
mnem = "idiv";
break;
diff --git a/src/3rdparty/v8/src/x64/full-codegen-x64.cc b/src/3rdparty/v8/src/x64/full-codegen-x64.cc
index 22f2ebb..a71c9b1 100644
--- a/src/3rdparty/v8/src/x64/full-codegen-x64.cc
+++ b/src/3rdparty/v8/src/x64/full-codegen-x64.cc
@@ -123,6 +123,8 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -136,6 +138,8 @@ void FullCodeGenerator::Generate() {
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
+ Label begin;
+ __ bind(&begin);
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
@@ -143,6 +147,8 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(Operand(rsp, receiver_offset), kScratchRegister);
__ bind(&ok);
+ ASSERT(!FLAG_age_code ||
+ (kSizeOfFullCodegenStrictModePrologue == ok.pos() - begin.pos()));
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -173,11 +179,14 @@ void FullCodeGenerator::Generate() {
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0 ||
(scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment cmnt(masm_, "[ Allocate local context");
+ Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in rdi.
__ push(rdi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -253,7 +262,7 @@ void FullCodeGenerator::Generate() {
scope()->VisitIllegalRedeclaration(this);
} else {
- PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
@@ -268,7 +277,7 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
@@ -311,10 +320,6 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
// Self-optimization is a one-off thing; if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
- if (isolate()->IsDebuggerActive()) {
- // Detect debug break requests as soon as possible.
- reset_value = 10;
- }
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
@@ -324,10 +329,6 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 162;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -339,7 +340,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -395,7 +396,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance = kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@@ -509,12 +510,20 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ Move(result_register(), lit);
+ if (lit->IsSmi()) {
+ __ SafeMove(result_register(), Smi::cast(*lit));
+ } else {
+ __ Move(result_register(), lit);
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- __ Push(lit);
+ if (lit->IsSmi()) {
+ __ SafePush(Smi::cast(*lit));
+ } else {
+ __ Push(lit);
+ }
}
@@ -660,7 +669,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
ToBooleanStub stub(result_register());
__ push(result_register());
- __ CallStub(&stub);
+ __ CallStub(&stub, condition->test_id());
__ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -758,7 +767,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
@@ -780,11 +789,12 @@ void FullCodeGenerator::VisitVariableDeclaration(
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ : isolate()->factory()->undefined_value(),
+ zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
break;
case Variable::PARAMETER:
@@ -812,10 +822,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ push(rsi);
__ Push(variable->name());
// Declaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR || mode == LET ||
- mode == CONST || mode == CONST_HARMONY);
+ ASSERT(IsDeclaredVariableMode(mode));
PropertyAttributes attr =
- (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE;
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -839,13 +848,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
- globals_->Add(variable->name());
+ globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals_->Add(function);
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ globals_->Add(function, zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
break;
}
@@ -897,9 +906,9 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name());
- globals_->Add(instance);
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()));
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
Visit(declaration->module());
break;
}
@@ -1103,22 +1112,32 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label fixed_array;
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
- __ j(not_equal, &fixed_array, Label::kNear);
+ __ j(not_equal, &fixed_array);
// We got a map in register rax. Get the enumeration cache from it.
__ bind(&use_cache);
+
+ Label no_descriptors;
+
+ __ EnumLength(rdx, rax);
+ __ Cmp(rdx, Smi::FromInt(0));
+ __ j(equal, &no_descriptors);
+
__ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
- __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(rax); // Map.
- __ push(rdx); // Enumeration cache.
- __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- __ push(rax); // Enumeration cache length (as smi).
+ __ push(rcx); // Enumeration cache.
+ __ push(rdx); // Number of valid entries for the map in the enum cache.
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
+ __ bind(&no_descriptors);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(&exit);
+
// We got a fixed array in register rax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
@@ -1127,7 +1146,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
- RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(rbx, cell);
__ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
@@ -1286,9 +1305,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ movq(temp, context);
}
// Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ bind(&next);
- // Terminate at global context.
+ // Terminate at native context.
__ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
@@ -1571,9 +1590,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
- expr->CalculateEmitStore();
+ expr->CalculateEmitStore(zone());
- AccessorTable accessor_table(isolate()->zone());
+ AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1599,7 +1618,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1663,7 +1682,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+ bool has_constant_fast_elements =
+ IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1674,7 +1694,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
@@ -1686,10 +1706,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- ASSERT(constant_elements_kind == FAST_ELEMENTS ||
- constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
- // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+ // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@@ -1717,9 +1736,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
- if (constant_elements_kind == FAST_ELEMENTS) {
- // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
- // transition and don't need to call the runtime stub.
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
+ // cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
@@ -1810,11 +1829,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@@ -1869,14 +1888,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@@ -1898,7 +1917,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movq(rax, rcx);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -1947,7 +1967,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(rdx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
@@ -2073,7 +2094,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, rcx);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ movq(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
@@ -2105,37 +2126,15 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
ASSERT(prop != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- __ push(Operand(rsp, kPointerSize)); // Receiver is now under value.
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->handle());
- if (expr->ends_initialization_block()) {
- __ movq(rdx, Operand(rsp, 0));
- } else {
- __ pop(rdx);
- }
+ __ pop(rdx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(rax); // Result of assignment, saved even if not needed.
- __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(rax);
- __ Drop(1);
- }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
}
@@ -2144,38 +2143,14 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- // Receiver is now under the key and value.
- __ push(Operand(rsp, 2 * kPointerSize));
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
__ pop(rcx);
- if (expr->ends_initialization_block()) {
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on the stack for later.
- } else {
- __ pop(rdx);
- }
+ __ pop(rdx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ pop(rdx);
- __ push(rax); // Result of assignment, saved even if not needed.
- __ push(rdx);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(rax);
- }
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2189,6 +2164,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(rax);
} else {
VisitForStackValue(expr->obj());
@@ -2202,7 +2178,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
ic_total_count_++;
__ call(code, rmode, ast_id);
}
@@ -2225,7 +2201,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2258,7 +2234,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2278,20 +2254,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
- // Record call targets in unoptimized code, but not in the snapshot.
- if (!Serializer::enabled()) {
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ Move(rbx, cell);
- }
+ // Record call targets in unoptimized code.
+ flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ Move(rbx, cell);
CallFunctionStub stub(arg_count, flags);
__ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2469,20 +2443,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ movq(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
- CallFunctionFlags flags;
- if (!Serializer::enabled()) {
- flags = RECORD_CALL_TARGET;
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->id(), cell);
- __ Move(rbx, cell);
- } else {
- flags = NO_CALL_FUNCTION_FLAGS;
- }
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ Move(rbx, cell);
- CallConstructStub stub(flags);
+ CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(rax);
@@ -2623,7 +2591,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(rax);
+ __ AssertNotSmi(rax);
// Check whether this map has already been checked to be safe for default
// valueOf.
@@ -2639,45 +2607,50 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ j(equal, if_false);
// Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(rcx, rbx);
+ __ cmpq(rcx, Immediate(0));
+ __ j(equal, &done);
+
__ LoadInstanceDescriptors(rbx, rbx);
- __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
- // rbx: descriptor array
- // rcx: length of descriptor array
+ // rbx: descriptor array.
+ // rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
+ __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
__ lea(rcx,
Operand(
- rbx, index.reg, index.scale, FixedArray::kHeaderSize));
+ rbx, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(rbx,
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
+ __ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
- Label entry, loop;
__ jmp(&entry);
__ bind(&loop);
__ movq(rdx, FieldOperand(rbx, 0));
__ Cmp(rdx, FACTORY->value_of_symbol());
__ j(equal, if_false);
- __ addq(rbx, Immediate(kPointerSize));
+ __ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmpq(rbx, rcx);
__ j(not_equal, &loop);
+ __ bind(&done);
// Reload map as register rbx was used as temporary above.
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // If a valueOf property is not found on the object check that it's
+ // If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
__ testq(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ cmpq(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ j(not_equal, if_false);
@@ -2847,7 +2820,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- if (FLAG_debug_code) __ AbortIfNotSmi(rax);
+ __ AssertSmi(rax);
context()->Plug(rax);
}
@@ -2954,12 +2927,14 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
__ PrepareCallCFunction(1);
#ifdef _WIN64
- __ movq(rcx, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
#else
- __ movq(rdi, ContextOperand(context_register(), Context::GLOBAL_INDEX));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
#endif
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
@@ -3033,19 +3008,18 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
- Label runtime, done;
+ Label runtime, done, not_date_object;
Register object = rax;
Register result = rax;
Register scratch = rcx;
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ __ JumpIfSmi(object, &not_date_object);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ Assert(equal, "Trying to get date field from non-date.");
-#endif
+ __ j(not_equal, &not_date_object);
if (index->value() == 0) {
__ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@@ -3067,8 +3041,12 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
#endif
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&done);
+ __ jmp(&done);
}
+
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
context()->Plug(rax);
}
@@ -3333,10 +3311,11 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
VisitForAccumulatorValue(args->last()); // Function.
- // Check for proxy.
- Label proxy, done;
- __ CmpObjectType(rax, JS_FUNCTION_PROXY_TYPE, rbx);
- __ j(equal, &proxy);
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ j(not_equal, &runtime);
// InvokeFunction requires the function in rdi. Move it in there.
__ movq(rdi, result_register());
@@ -3346,7 +3325,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
- __ bind(&proxy);
+ __ bind(&runtime);
__ push(rax);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3375,7 +3354,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- isolate()->global_context()->jsfunction_result_caches());
+ isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3388,9 +3367,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = rax;
Register cache = rbx;
Register tmp = rcx;
- __ movq(cache, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
__ movq(cache,
- FieldOperand(cache, GlobalObject::kGlobalContextOffset));
+ FieldOperand(cache, GlobalObject::kNativeContextOffset));
__ movq(cache,
ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ movq(cache,
@@ -3491,9 +3470,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
- __ AbortIfNotString(rax);
- }
+ __ AssertString(rax);
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
@@ -3571,7 +3548,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Loop condition: while (index < array_length).
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ cmpq(index, array_length);
__ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
}
@@ -3585,7 +3562,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
__ AddSmiField(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset));
@@ -3624,7 +3601,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
// Live registers:
@@ -3817,7 +3794,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->id());
+ CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
@@ -3975,7 +3952,8 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register rax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
context()->Plug(rax);
}
@@ -4031,7 +4009,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
- PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
// Call ToNumber only if operand is not a smi.
@@ -4096,7 +4074,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4130,7 +4108,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4147,7 +4125,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4354,7 +4332,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4436,7 +4414,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
- // Contexts nested in the global context have a canonical empty function
+ // Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@@ -4466,15 +4444,52 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ subq(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
__ push(rdx);
+
// Store result register while executing finally block.
__ push(result_register());
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Load(rdx, pending_message_obj);
+ __ push(rdx);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Load(rdx, has_pending_message);
+ __ Integer32ToSmi(rdx, rdx);
+ __ push(rdx);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Load(rdx, pending_message_script);
+ __ push(rdx);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
+ // Restore pending message from stack.
+ __ pop(rdx);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Store(pending_message_script, rdx);
+
+ __ pop(rdx);
+ __ SmiToInteger32(rdx, rdx);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Store(has_pending_message, rdx);
+
+ __ pop(rdx);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Store(pending_message_obj, rdx);
+
+ // Restore result register from stack.
__ pop(result_register());
+
// Uncook return address.
__ pop(rdx);
__ SmiToInteger32(rdx, rdx);
diff --git a/src/3rdparty/v8/src/x64/ic-x64.cc b/src/3rdparty/v8/src/x64/ic-x64.cc
index 6ba5fb6..efa07a8 100644
--- a/src/3rdparty/v8/src/x64/ic-x64.cc
+++ b/src/3rdparty/v8/src/x64/ic-x64.cc
@@ -135,7 +135,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
r0,
r1);
- // If probing finds an entry in the dictionary, r0 contains the
+ // If probing finds an entry in the dictionary, r1 contains the
// index into the dictionary. Check that the value is a normal
// property.
__ bind(&done);
@@ -178,10 +178,9 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
//
// value - holds the value to store and is unchanged.
//
- // scratch0 - used for index into the property dictionary and is clobbered.
+ // scratch0 - used during the positive dictionary lookup and is clobbered.
//
- // scratch1 - used to hold the capacity of the property dictionary and is
- // clobbered.
+ // scratch1 - used for index into the property dictionary and is clobbered.
Label done;
// Probe the dictionary.
@@ -624,6 +623,123 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
}
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ // rax: value
+ // rbx: receiver's elements array (a FixedArray)
+ // rcx: index
+ // rdx: receiver (a JSArray)
+ // r9: map of receiver
+ if (check_map == kCheckMap) {
+ __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, fast_double);
+ }
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(rax, &non_smi_value);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ ret(0);
+
+ __ bind(&non_smi_value);
+ // Writing a non-smi, check whether array allows non-smi elements.
+ // r9: receiver's map
+ __ CheckFastObjectElements(r9, &transition_smi_elements);
+
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ }
+ __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ movq(rdx, rax); // Preserve the value which is returned.
+ __ RecordWriteArray(
+ rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ ret(0);
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ // rdi: elements array's map
+ __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ j(not_equal, slow);
+ }
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ }
+ __ ret(0);
+
+ __ bind(&transition_smi_elements);
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+
+ // Transition the array appropriately depending on the value type.
+ __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ rbx,
+ rdi,
+ slow);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ rbx,
+ rdi,
+ slow);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ rbx,
+ rdi,
+ slow);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
@@ -632,11 +748,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
- Label fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
- Label transition_smi_elements, finish_object_store, non_double_value;
- Label transition_double_elements;
+ Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
@@ -667,7 +781,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rbx: FixedArray
// rcx: index
- __ j(above, &fast_object_with_map_check);
+ __ j(above, &fast_object);
// Slow case: call runtime.
__ bind(&slow);
@@ -691,18 +805,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Increment index to get new length.
__ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_extra_double);
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- __ jmp(&fast_object_without_map_check);
+ __ j(not_equal, &check_if_double_array);
+ __ jmp(&fast_object_grow);
- __ bind(&check_extra_double);
+ __ bind(&check_if_double_array);
// rdi: elements array's map
__ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
__ j(not_equal, &slow);
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- __ jmp(&fast_double_without_map_check);
+ __ jmp(&fast_double_grow);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -718,92 +828,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
__ j(below_equal, &extra);
- // Fast case: Do the store.
- __ bind(&fast_object_with_map_check);
- // rax: value
- // rbx: receiver's elements array (a FixedArray)
- // rcx: index
- // rdx: receiver (a JSArray)
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &fast_double_with_map_check);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(rax, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Writing a non-smi, check whether array allows non-smi elements.
- // r9: receiver's map
- __ CheckFastObjectElements(r9, &transition_smi_elements);
- __ bind(&finish_object_store);
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ movq(rdx, rax); // Preserve the value which is returned.
- __ RecordWriteArray(
- rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, &slow);
- __ bind(&fast_double_without_map_check);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
- &transition_double_elements);
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- rbx,
- rdi,
- &slow);
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- &slow);
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- &slow);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength);
}
@@ -823,7 +851,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
- NORMAL,
+ Code::NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
rax);
@@ -1642,7 +1670,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
__ movq(rax, rdx);
__ Ret();
__ bind(&fail);
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
index d34a520..a948ccc 100644
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
+++ b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
@@ -92,17 +92,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LChunkBuilder::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -128,6 +119,8 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -140,6 +133,8 @@ bool LCodeGen::GeneratePrologue() {
// object). rcx is zero for method calls and non-zero for function
// calls.
if (!info_->is_classic_mode() || info_->is_native()) {
+ Label begin;
+ __ bind(&begin);
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
@@ -148,6 +143,8 @@ bool LCodeGen::GeneratePrologue() {
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(Operand(rsp, receiver_offset), kScratchRegister);
__ bind(&ok);
+ ASSERT(!FLAG_age_code ||
+ (kSizeOfOptimizedStrictModePrologue == ok.pos() - begin.pos()));
}
__ push(rbp); // Caller's frame pointer.
@@ -321,24 +318,24 @@ bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
- value->Number());
- return static_cast<int32_t>(value->Number());
+ ASSERT(constant->HasInteger32Value());
+ return constant->Integer32Value();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
- Handle<Object> value = chunk_->LookupLiteral(op);
- return value->Number();
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
}
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- Handle<Object> literal = chunk_->LookupLiteral(op);
+ HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return literal;
+ return constant->handle();
}
@@ -359,7 +356,9 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -367,8 +366,21 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- WriteTranslation(environment->outer(), translation);
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ // Function parameters are arguments to the outermost environment. The
+ // arguments index points to the first element of a sequence of tagged
+ // values on the stack that represent the arguments. This needs to be
+ // kept in sync with the LArgumentsElements implementation.
+ *arguments_index = -environment->parameter_count();
+ *arguments_count = environment->parameter_count();
+
+ WriteTranslation(environment->outer(),
+ translation,
+ arguments_index,
+ arguments_count);
+ int closure_id = *info()->closure() != *environment->closure()
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -376,12 +388,31 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
- default:
- UNREACHABLE();
}
+
+ // Inlined frames which push their arguments cause the index to be
+ // bumped and a new stack area to be used for materialization.
+ if (environment->entry() != NULL &&
+ environment->entry()->arguments_pushed()) {
+ *arguments_index = *arguments_index < 0
+ ? GetStackSlotCount()
+ : *arguments_index + *arguments_count;
+ *arguments_count = environment->entry()->arguments_count() + 1;
+ }
+
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -392,7 +423,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i));
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -400,26 +434,39 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
- false);
+ false,
+ false,
+ *arguments_index,
+ *arguments_count);
}
}
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged) {
+ bool is_tagged,
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject();
+ translation->StoreArgumentsObject(arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
@@ -433,6 +480,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@@ -440,8 +489,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(literal);
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -518,20 +567,22 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
+ int args_index = 0;
+ int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
- Translation translation(&translations_, frame_count, jsframe_count);
- WriteTranslation(environment, &translation);
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation, &args_index, &args_count);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment);
+ deoptimizations_.Add(environment, environment->zone());
}
}
@@ -553,7 +604,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
// jump entry if this is the case.
if (jump_table_.is_empty() ||
jump_table_.last().address != entry) {
- jump_table_.Add(JumpTableEntry(entry));
+ jump_table_.Add(JumpTableEntry(entry), zone());
}
__ j(cc, &jump_table_.last().label);
}
@@ -577,13 +628,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
data->SetLiteralArray(*literals);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@@ -598,7 +649,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
- deoptimization_literals_.Add(literal);
+ deoptimization_literals_.Add(literal, zone());
return result;
}
@@ -645,14 +696,14 @@ void LCodeGen::RecordSafepoint(
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
+ safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (kind & Safepoint::kWithRegisters) {
// Register rsi always contains a pointer to the context.
- safepoint.DefinePointerRegister(rsi);
+ safepoint.DefinePointerRegister(rsi, zone());
}
}
@@ -664,7 +715,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -772,7 +823,7 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->InputAt(0));
+ Register dividend = ToRegister(instr->left());
int32_t divisor =
HConstant::cast(instr->hydrogen()->right())->Integer32Value();
@@ -796,8 +847,8 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&done);
} else {
Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
- Register left_reg = ToRegister(instr->InputAt(0));
- Register right_reg = ToRegister(instr->InputAt(1));
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
ASSERT(left_reg.is(rax));
@@ -827,7 +878,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(less, &remainder_eq_dividend, Label::kNear);
// Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
__ movl(scratch, right_reg);
__ subl(scratch, Immediate(1));
__ testl(scratch, right_reg);
@@ -883,12 +934,95 @@ void LCodeGen::DoModI(LModI* instr) {
}
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+ ASSERT(instr->right()->IsConstantOperand());
+
+ const Register dividend = ToRegister(instr->left());
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
+ const Register result = ToRegister(instr->result());
+
+ switch (divisor) {
+ case 0:
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+
+ case 1:
+ if (!result.is(dividend)) {
+ __ movl(result, dividend);
+ }
+ return;
+
+ case -1:
+ if (!result.is(dividend)) {
+ __ movl(result, dividend);
+ }
+ __ negl(result);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ return;
+ }
+
+ uint32_t divisor_abs = abs(divisor);
+ if (IsPowerOf2(divisor_abs)) {
+ int32_t power = WhichPowerOf2(divisor_abs);
+ if (divisor < 0) {
+ __ movsxlq(result, dividend);
+ __ neg(result);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ sar(result, Immediate(power));
+ } else {
+ if (!result.is(dividend)) {
+ __ movl(result, dividend);
+ }
+ __ sarl(result, Immediate(power));
+ }
+ } else {
+ Register reg1 = ToRegister(instr->temp());
+ Register reg2 = ToRegister(instr->result());
+
+ // Find b which: 2^b < divisor_abs < 2^(b+1).
+ unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
+ unsigned shift = 32 + b; // Precision +1bit (effectively).
+ double multiplier_f =
+ static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
+ int64_t multiplier;
+ if (multiplier_f - floor(multiplier_f) < 0.5) {
+ multiplier = static_cast<int64_t>(floor(multiplier_f));
+ } else {
+ multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
+ }
+ // The multiplier is a uint32.
+ ASSERT(multiplier > 0 &&
+ multiplier < (static_cast<int64_t>(1) << 32));
+ // The multiply is int64, so sign-extend to r64.
+ __ movsxlq(reg1, dividend);
+ if (divisor < 0 &&
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ neg(reg1);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ __ movq(reg2, multiplier, RelocInfo::NONE);
+ // Result just fit in r64, because it's int32 * uint32.
+ __ imul(reg2, reg1);
+
+ __ addq(reg2, Immediate(1 << 30));
+ __ sar(reg2, Immediate(shift));
+ }
+}
+
+
void LCodeGen::DoDivI(LDivI* instr) {
- LOperand* right = instr->InputAt(1);
+ LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->InputAt(0)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
+ ASSERT(ToRegister(instr->left()).is(rax));
+ ASSERT(!ToRegister(instr->right()).is(rax));
+ ASSERT(!ToRegister(instr->right()).is(rdx));
Register left_reg = rax;
@@ -930,8 +1064,8 @@ void LCodeGen::DoDivI(LDivI* instr) {
void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->InputAt(0));
- LOperand* right = instr->InputAt(1);
+ Register left = ToRegister(instr->left());
+ LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ movl(kScratchRegister, left);
@@ -996,8 +1130,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ testl(left, left);
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
+ if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
+ } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
+ __ cmpl(kScratchRegister, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
}
} else if (right->IsStackSlot()) {
__ orl(kScratchRegister, ToOperand(right));
@@ -1013,8 +1150,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
@@ -1070,14 +1207,17 @@ void LCodeGen::DoBitI(LBitI* instr) {
void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
if (right->IsRegister()) {
ASSERT(ToRegister(right).is(rcx));
switch (instr->op()) {
+ case Token::ROR:
+ __ rorl_cl(ToRegister(left));
+ break;
case Token::SAR:
__ sarl_cl(ToRegister(left));
break;
@@ -1099,6 +1239,11 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ rorl(ToRegister(left), Immediate(shift_count));
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ sarl(ToRegister(left), Immediate(shift_count));
@@ -1126,8 +1271,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@@ -1161,7 +1306,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
if (int_val == 0) {
__ xorps(res, res);
} else {
- Register tmp = ToRegister(instr->TempAt(0));
+ Register tmp = ToRegister(instr->temp());
__ Set(tmp, int_val);
__ movq(res, tmp);
}
@@ -1181,21 +1326,28 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ movq(result, FieldOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
}
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLength(result, map);
+}
+
+
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
// Load map into |result|.
__ movq(result, FieldOperand(input, HeapObject::kMapOffset));
@@ -1208,7 +1360,7 @@ void LCodeGen::DoElementsKind(LElementsKind* instr) {
void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Label done;
@@ -1225,18 +1377,17 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
Smi* index = instr->index();
- Label runtime, done;
+ Label runtime, done, not_date_object;
ASSERT(object.is(result));
ASSERT(object.is(rax));
-#ifdef DEBUG
- __ AbortIfSmi(object);
+ Condition cc = masm()->CheckSmi(object);
+ DeoptimizeIf(cc, instr->environment());
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- __ Assert(equal, "Trying to get date field from non-date.");
-#endif
+ DeoptimizeIf(not_equal, instr->environment());
if (index->value() == 0) {
__ movq(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1268,14 +1419,14 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
__ not_(ToRegister(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->value()));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -1286,8 +1437,8 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@@ -1305,9 +1456,75 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ ASSERT(left->Equals(instr->result()));
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Label return_left;
+ Condition condition = (operation == HMathMinMax::kMathMin)
+ ? less_equal
+ : greater_equal;
+ Register left_reg = ToRegister(left);
+ if (right->IsConstantOperand()) {
+ Immediate right_imm =
+ Immediate(ToInteger32(LConstantOperand::cast(right)));
+ __ cmpq(left_reg, right_imm);
+ __ j(condition, &return_left, Label::kNear);
+ __ movq(left_reg, right_imm);
+ } else if (right->IsRegister()) {
+ Register right_reg = ToRegister(right);
+ __ cmpq(left_reg, right_reg);
+ __ j(condition, &return_left, Label::kNear);
+ __ movq(left_reg, right_reg);
+ } else {
+ Operand right_op = ToOperand(right);
+ __ cmpq(left_reg, right_op);
+ __ j(condition, &return_left, Label::kNear);
+ __ movq(left_reg, right_op);
+ }
+ __ bind(&return_left);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+ XMMRegister left_reg = ToDoubleRegister(left);
+ XMMRegister right_reg = ToDoubleRegister(right);
+ __ ucomisd(left_reg, right_reg);
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(left_reg, xmm_scratch);
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ if (operation == HMathMinMax::kMathMin) {
+ __ orpd(left_reg, right_reg);
+ } else {
+ // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
+ __ addsd(left_reg, right_reg);
+ }
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ ucomisd(left_reg, left_reg); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear);
+ __ bind(&return_right);
+ __ movsd(left_reg, right_reg);
+
+ __ bind(&return_left);
+ }
+}
+
+
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->InputAt(0));
- XMMRegister right = ToDoubleRegister(instr->InputAt(1));
+ XMMRegister left = ToDoubleRegister(instr->left());
+ XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
// All operations except MOD are computed in-place.
ASSERT(instr->op() == Token::MOD || left.is(result));
@@ -1341,8 +1558,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(rdx));
- ASSERT(ToRegister(instr->InputAt(1)).is(rax));
+ ASSERT(ToRegister(instr->left()).is(rdx));
+ ASSERT(ToRegister(instr->right()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
@@ -1386,17 +1603,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
__ testl(reg, reg);
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister reg = ToDoubleRegister(instr->value());
__ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
@@ -1533,8 +1750,8 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
@@ -1581,8 +1798,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1592,7 +1809,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
+ Register left = ToRegister(instr->left());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1602,7 +1819,7 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely
@@ -1632,7 +1849,7 @@ void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
__ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ testb(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -1667,7 +1884,7 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1691,8 +1908,8 @@ Condition LCodeGen::EmitIsString(Register input,
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1709,11 +1926,11 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
Condition is_smi;
- if (instr->InputAt(0)->IsRegister()) {
- Register input = ToRegister(instr->InputAt(0));
+ if (instr->value()->IsRegister()) {
+ Register input = ToRegister(instr->value());
is_smi = masm()->CheckSmi(input);
} else {
- Operand input = ToOperand(instr->InputAt(0));
+ Operand input = ToOperand(instr->value());
is_smi = masm()->CheckSmi(input);
}
EmitBranch(true_block, false_block, is_smi);
@@ -1721,8 +1938,8 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1771,7 +1988,7 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1786,12 +2003,10 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- if (FLAG_debug_code) {
- __ AbortIfNotString(input);
- }
+ __ AssertString(input);
__ movl(result, FieldOperand(input, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
@@ -1801,7 +2016,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1881,9 +2096,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1899,7 +2114,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
@@ -1910,8 +2125,8 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->InputAt(0)));
- __ push(ToRegister(instr->InputAt(1)));
+ __ push(ToRegister(instr->left()));
+ __ push(ToRegister(instr->right()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ testq(rax, rax);
@@ -1942,10 +2157,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->value());
// A Smi is not an instance of anything.
__ JumpIfSmi(object, &false_result);
@@ -1955,7 +2170,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// instanceof stub.
Label cache_miss;
// Use a temp register to avoid memory operands with variable lengths.
- Register map = ToRegister(instr->TempAt(0));
+ Register map = ToRegister(instr->temp());
__ movq(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<JSGlobalPropertyCell> cache_cell =
@@ -1998,7 +2213,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
InstanceofStub stub(flags);
- __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->value()));
__ PushHeapObject(instr->function());
static const int kAdditionalDelta = 10;
@@ -2098,7 +2313,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
// We have a temp because CompareRoot might clobber kScratchRegister.
- Register cell = ToRegister(instr->TempAt(0));
+ Register cell = ToRegister(instr->temp());
ASSERT(!value.is(cell));
__ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
@@ -2166,7 +2381,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
__ RecordWriteContextSlot(context,
offset,
value,
@@ -2181,7 +2396,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
@@ -2195,12 +2410,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name) {
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
- type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() &&
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
- if (lookup.type() == FIELD) {
+ type->LookupDescriptor(NULL, *name, &lookup);
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@@ -2212,13 +2427,43 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
- } else {
+ } else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
+ Heap* heap = type->GetHeap();
+ while (*current != heap->null_value()) {
+ __ LoadHeapObject(result, current);
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Handle<Map>(current->map()));
+ DeoptimizeIf(not_equal, env);
+ current =
+ Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
+ }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
+// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
+// prototype chain, which causes unbounded code generation.
+static bool CompactEmit(SmallMapList* list,
+ Handle<String> name,
+ int i,
+ Isolate* isolate) {
+ Handle<Map> map = list->at(i);
+ // If the map has ElementsKind transitions, we will generate map checks
+ // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
+ if (map->HasElementsTransition()) return false;
+ LookupResult lookup(isolate);
+ map->LookupDescriptor(NULL, *name, &lookup);
+ return lookup.IsField() || lookup.IsConstantFunction();
+}
+
+
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
@@ -2232,18 +2477,32 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
}
Handle<String> name = instr->hydrogen()->name();
Label done;
+ bool all_are_compact = true;
+ for (int i = 0; i < map_count; ++i) {
+ if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
+ all_are_compact = false;
+ break;
+ }
+ }
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
- __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ Label check_passed;
+ __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
- EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
} else {
Label next;
- __ j(not_equal, &next, Label::kNear);
- EmitLoadFieldOrConstantFunction(result, object, map, name);
- __ jmp(&done, Label::kNear);
+ bool compact = all_are_compact ? true :
+ CompactEmit(instr->hydrogen()->types(), name, i, isolate());
+ __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
+ __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
__ bind(&next);
}
}
@@ -2309,7 +2568,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->object());
__ movq(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done, ok, fail;
@@ -2325,8 +2584,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
__ and_(temp, Immediate(Map::kElementsKindMask));
__ shr(temp, Immediate(Map::kElementsKindShift));
- __ cmpl(temp, Immediate(FAST_ELEMENTS));
- __ j(equal, &ok, Label::kNear);
+ __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
+ __ j(less, &fail, Label::kNear);
+ __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
+ __ j(less_equal, &ok, Label::kNear);
__ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ j(less, &fail, Label::kNear);
__ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@@ -2343,7 +2604,7 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->object());
__ movq(result, FieldOperand(input,
ExternalPixelArray::kExternalPointerOffset));
}
@@ -2353,118 +2614,47 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register length = ToRegister(instr->length());
Register result = ToRegister(instr->result());
-
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
if (instr->index()->IsRegister()) {
__ subl(length, ToRegister(instr->index()));
} else {
__ subl(length, ToOperand(instr->index()));
}
- DeoptimizeIf(below_equal, instr->environment());
-
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
__ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
}
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits.
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
- // Load the result.
- __ movq(result,
- BuildFastArrayOperand(instr->elements(),
- instr->key(),
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
- XMMRegister result(ToDoubleRegister(instr->result()));
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
- __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movsd(result, double_load_operand);
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
- Register elements_pointer_reg = ToRegister(elements_pointer);
- int shift_size = ElementsKindToShiftSize(elements_kind);
- if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
- if (constant_value & 0xF0000000) {
- Abort("array index constant value too big");
+template <class T>
+inline void LCodeGen::PrepareKeyForKeyedOp(T* hydrogen_instr, LOperand* key) {
+ if (ArrayOpClobbersKey<T>(hydrogen_instr)) {
+ // Even though the HLoad/StoreKeyed (in this case) instructions force
+ // the input representation for the key to be an integer, the input
+ // gets replaced during bound check elimination with the index argument
+ // to the bounds check, which can be tagged, so that case must be
+ // handled here, too.
+ Register key_reg = ToRegister(key);
+ if (hydrogen_instr->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (hydrogen_instr->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
}
- return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
- } else {
- ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- offset + (additional_index << shift_size));
}
}
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
+ LOperand* key = instr->key();
+ PrepareKeyForKeyedOp(instr->hydrogen(), key);
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
@@ -2493,17 +2683,19 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(result, operand);
- __ testl(result, result);
- // TODO(danno): we could be more clever here, perhaps having a special
- // version of the stub that detects if the overflow case actually
- // happens, and generate code that returns a double rather than int.
- DeoptimizeIf(negative, instr->environment());
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ testl(result, result);
+ DeoptimizeIf(negative, instr->environment());
+ }
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -2513,6 +2705,96 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
}
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ LOperand* key = instr->key();
+ PrepareKeyForKeyedOp<HLoadKeyed>(instr->hydrogen(), key);
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ FAST_DOUBLE_ELEMENTS,
+ offset,
+ instr->additional_index());
+ __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ Operand double_load_operand = BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+ __ movsd(result, double_load_operand);
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ Register result = ToRegister(instr->result());
+ LOperand* key = instr->key();
+ PrepareKeyForKeyedOp<HLoadKeyed>(instr->hydrogen(), key);
+
+ // Load the result.
+ __ movq(result,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ FixedArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index()));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ Condition smi = __ CheckSmi(result);
+ DeoptimizeIf(NegateCondition(smi), instr->environment());
+ } else {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+ LOperand* elements_pointer,
+ LOperand* key,
+ ElementsKind elements_kind,
+ uint32_t offset,
+ uint32_t additional_index) {
+ Register elements_pointer_reg = ToRegister(elements_pointer);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+ if (key->IsConstantOperand()) {
+ int constant_value = ToInteger32(LConstantOperand::cast(key));
+ if (constant_value & 0xF0000000) {
+ Abort("array index constant value too big");
+ }
+ return Operand(elements_pointer_reg,
+ ((constant_value + additional_index) << shift_size)
+ + offset);
+ } else {
+ ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+ return Operand(elements_pointer_reg,
+ ToRegister(key),
+ scale_factor,
+ offset + (additional_index << shift_size));
+ }
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rax));
@@ -2556,10 +2838,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Label done;
// If no arguments adaptor frame the number of arguments is fixed.
- if (instr->InputAt(0)->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->InputAt(0)));
+ if (instr->elements()->IsRegister()) {
+ __ cmpq(rbp, ToRegister(instr->elements()));
} else {
- __ cmpq(rbp, ToOperand(instr->InputAt(0)));
+ __ cmpq(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
@@ -2616,7 +2898,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// TODO(kmillikin): We have a hydrogen value for the global object. See
// if it's better to use it than to explicitly fetch it from the context
// here.
- __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
__ movq(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
@@ -2667,7 +2949,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
+ LOperand* argument = instr->value();
EmitPushTaggedOperand(argument);
}
@@ -2679,7 +2961,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ LoadHeapObject(result, instr->hydrogen()->closure());
+ __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -2734,14 +3016,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadHeapObject(rdi, function);
}
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- }
+ // Change context.
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Set rax to arguments count if adaption is not needed. Assumes that rax
// is available to write to at this point.
@@ -2783,7 +3059,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr->environment());
@@ -2835,7 +3111,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
__ testl(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive);
@@ -2860,12 +3136,12 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
LUnaryMathOperation* instr_;
};
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ andpd(input_reg, scratch);
@@ -2873,8 +3149,8 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
- new DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->InputAt(0));
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiToInteger32(input_reg, input_reg);
@@ -2888,8 +3164,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- Label done;
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
@@ -2904,10 +3179,13 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
__ cmpl(output_reg, Immediate(0x80000000));
DeoptimizeIf(equal, instr->environment());
} else {
+ Label negative_sign, done;
// Deoptimize on negative inputs.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment());
+ __ j(below, &negative_sign, Label::kNear);
+
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Check for negative zero.
Label positive_sign;
@@ -2922,19 +3200,30 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, input_reg);
-
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x80000000));
DeoptimizeIf(equal, instr->environment());
+ __ jmp(&done, Label::kNear);
+
+ // Non-zero negative reaches here.
+ __ bind(&negative_sign);
+ // Truncate, then compare and compensate.
+ __ cvttsd2si(output_reg, input_reg);
+ __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ ucomisd(input_reg, xmm_scratch);
+ __ j(equal, &done, Label::kNear);
+ __ subl(output_reg, Immediate(1));
+ DeoptimizeIf(overflow, instr->environment());
+
+ __ bind(&done);
}
- __ bind(&done);
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
const XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
Label done;
// xmm_scratch = 0.5
@@ -2979,7 +3268,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
}
@@ -2987,7 +3276,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
// Note that according to ECMA-262 15.8.2.13:
@@ -3028,11 +3317,11 @@ void LCodeGen::DoPower(LPower* instr) {
#else
Register exponent = rdi;
#endif
- ASSERT(!instr->InputAt(1)->IsRegister() ||
- ToRegister(instr->InputAt(1)).is(exponent));
- ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
- ToDoubleRegister(instr->InputAt(1)).is(xmm1));
- ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+ ASSERT(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(exponent));
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(xmm1));
+ ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsTagged()) {
@@ -3065,7 +3354,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
LRandom* instr_;
};
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
@@ -3074,10 +3363,10 @@ void LCodeGen::DoRandom(LRandom* instr) {
// Choose the right register for the first argument depending on
// calling convention.
#ifdef _WIN64
- ASSERT(ToRegister(instr->InputAt(0)).is(rcx));
+ ASSERT(ToRegister(instr->global_object()).is(rcx));
Register global_object = rcx;
#else
- ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+ ASSERT(ToRegister(instr->global_object()).is(rdi));
Register global_object = rdi;
#endif
@@ -3085,11 +3374,11 @@ void LCodeGen::DoRandom(LRandom* instr) {
STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
__ movq(global_object,
- FieldOperand(global_object, GlobalObject::kGlobalContextOffset));
+ FieldOperand(global_object, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
- // rbx: FixedArray of the global context's random seeds
+ // rbx: FixedArray of the native context's random seeds
// Load state[0].
__ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
@@ -3292,7 +3581,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+ ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
@@ -3312,7 +3601,22 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
int offset = instr->offset();
if (!instr->transition().is_null()) {
- __ Move(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+ if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ __ Move(FieldOperand(object, HeapObject::kMapOffset),
+ instr->transition());
+ } else {
+ Register temp = ToRegister(instr->temp());
+ __ Move(kScratchRegister, instr->transition());
+ __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
+ // Update the write barrier for the map field.
+ __ RecordWriteField(object,
+ HeapObject::kMapOffset,
+ kScratchRegister,
+ temp,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ }
}
// Do the store.
@@ -3322,7 +3626,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->is_in_object()) {
__ movq(FieldOperand(object, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
offset,
@@ -3333,7 +3637,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
check_needed);
}
} else {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
__ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(FieldOperand(temp, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -3363,21 +3667,76 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- Operand operand(BuildFastArrayOperand(instr->external_pointer(),
- instr->key(),
- elements_kind,
- 0,
- instr->additional_index()));
+void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand) {
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
+ Condition cc;
+ if (operand->IsRegister()) {
+ cc = masm()->CheckSmi(ToRegister(operand));
+ } else {
+ cc = masm()->CheckSmi(ToOperand(operand));
+ }
+ DeoptimizeIf(NegateCondition(cc), environment);
+ }
+}
+
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->length(),
+ instr->length());
+ DeoptIfTaggedButNotSmi(instr->environment(),
+ instr->hydrogen()->index(),
+ instr->index());
+ if (instr->length()->IsRegister()) {
+ Register reg = ToRegister(instr->length());
+ if (!instr->hydrogen()->length()->representation().IsTagged()) {
+ __ AssertZeroExtended(reg);
+ }
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
+ __ Cmp(reg, Smi::FromInt(constant_index));
+ } else {
+ __ cmpq(reg, Immediate(constant_index));
+ }
+ } else {
+ Register reg2 = ToRegister(instr->index());
+ if (!instr->hydrogen()->index()->representation().IsTagged()) {
+ __ AssertZeroExtended(reg2);
+ }
+ __ cmpq(reg, reg2);
+ }
+ } else {
+ Operand length = ToOperand(instr->length());
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
+ __ Cmp(length, Smi::FromInt(constant_index));
+ } else {
+ __ cmpq(length, Immediate(constant_index));
+ }
+ } else {
+ __ cmpq(length, ToRegister(instr->index()));
+ }
}
+ DeoptimizeIf(below_equal, instr->environment());
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = instr->key();
+ PrepareKeyForKeyedOp<HStoreKeyed>(instr->hydrogen(), key);
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
@@ -3404,8 +3763,11 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3415,106 +3777,79 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->length()->IsRegister()) {
- Register reg = ToRegister(instr->length());
- if (FLAG_debug_code) {
- __ AbortIfNotZeroExtended(reg);
- }
- if (instr->index()->IsConstantOperand()) {
- __ cmpq(reg,
- Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
- } else {
- Register reg2 = ToRegister(instr->index());
- if (FLAG_debug_code) {
- __ AbortIfNotZeroExtended(reg2);
- }
- __ cmpq(reg, reg2);
- }
- } else {
- if (instr->index()->IsConstantOperand()) {
- __ cmpq(ToOperand(instr->length()),
- Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
- } else {
- __ cmpq(ToOperand(instr->length()), ToRegister(instr->index()));
- }
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ XMMRegister value = ToDoubleRegister(instr->value());
+ LOperand* key = instr->key();
+ PrepareKeyForKeyedOp<HStoreKeyed>(instr->hydrogen(), key);
+ if (instr->NeedsCanonicalization()) {
+ Label have_value;
+
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
+
+ __ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ __ movq(value, kScratchRegister);
+
+ __ bind(&have_value);
}
- DeoptimizeIf(below_equal, instr->environment());
+
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+
+ __ movsd(double_store_operand, value);
}
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ Register elements = ToRegister(instr->elements());
+ LOperand* key = instr->key();
+ PrepareKeyForKeyedOp<HStoreKeyed>(instr->hydrogen(), key);
Operand operand =
- BuildFastArrayOperand(instr->object(),
- instr->key(),
+ BuildFastArrayOperand(instr->elements(),
+ key,
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
- }
-
- __ movq(operand, value);
-
if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ lea(key, operand);
+ Register key_reg(ToRegister(key));
+ __ lea(key_reg, operand);
+ __ movq(Operand(key_reg, 0), value);
__ RecordWrite(elements,
- key,
+ key_reg,
value,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
+ } else {
+ __ movq(operand, value);
}
}
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
- XMMRegister value = ToDoubleRegister(instr->value());
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(value, kScratchRegister);
-
- __ bind(&have_value);
- }
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- Register key_reg = ToRegister(instr->key());
- __ movsxlq(key_reg, key_reg);
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
}
-
- __ movsd(double_store_operand, value);
}
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rcx));
@@ -3529,7 +3864,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register new_map_reg = ToRegister(instr->new_map_temp());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
@@ -3540,22 +3875,23 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable);
__ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
__ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
- ASSERT_NE(instr->temp_reg(), NULL);
+ ASSERT_NE(instr->temp(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- ToRegister(instr->temp_reg()), kDontSaveFPRegs);
- } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
- to_kind == FAST_DOUBLE_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ ToRegister(instr->temp()), kDontSaveFPRegs);
+ } else if (IsFastSmiElementsKind(from_kind) &&
+ IsFastDoubleElementsKind(to_kind)) {
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(rdx));
ASSERT(new_map_reg.is(rbx));
__ movq(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
- } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ } else if (IsFastDoubleElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind)) {
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(rdx));
ASSERT(new_map_reg.is(rbx));
__ movq(fixed_object_reg, object_reg);
@@ -3588,7 +3924,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
};
DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
@@ -3622,9 +3958,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ push(index);
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(rax);
- }
+ __ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -3642,7 +3976,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
};
DeferredStringCharFromCode* deferred =
- new DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -3686,7 +4020,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
@@ -3698,8 +4032,19 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ LOperand* temp = instr->temp();
+
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
@@ -3707,6 +4052,69 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
}
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagU(instr_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ cmpl(reg, Immediate(Smi::kMaxValue));
+ __ j(above, deferred->entry());
+ __ Integer32ToSmi(reg, reg);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
+ Label slow;
+ Register reg = ToRegister(instr->value());
+ Register tmp = reg.is(rax) ? rcx : rax;
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ Label done;
+ // Load value into xmm1 which will be preserved across potential call to
+ // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
+ // XMM registers on x64).
+ __ LoadUint32(xmm1, reg, xmm0);
+
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(reg, tmp, &slow);
+ __ jmp(&done, Label::kNear);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ // Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains an
+ // integer value.
+ __ StoreToSafepointRegisterSlot(reg, Immediate(0));
+
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ if (!reg.is(rax)) __ movq(reg, rax);
+
+ // Done. Put the value in xmm1 into the value of the allocated heap
+ // number.
+ __ bind(&done);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1);
+ __ StoreToSafepointRegisterSlot(reg, reg);
+}
+
+
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
class DeferredNumberTagD: public LDeferredCode {
public:
@@ -3718,11 +4126,11 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->TempAt(0));
+ Register tmp = ToRegister(instr->temp());
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, deferred->entry());
} else {
@@ -3751,19 +4159,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Register input = ToRegister(instr->InputAt(0));
+ ASSERT(instr->value()->Equals(instr->result()));
+ Register input = ToRegister(instr->value());
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
__ Integer32ToSmi(input, input);
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Register input = ToRegister(instr->InputAt(0));
+ ASSERT(instr->value()->Equals(instr->result()));
+ Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ } else {
+ __ AssertSmi(input);
}
__ SmiToInteger32(input, input);
}
@@ -3821,7 +4231,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3847,7 +4257,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(not_equal, instr->environment());
- XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
+ XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, xmm0);
__ cvtlsi2sd(xmm_temp, input_reg);
@@ -3877,12 +4287,12 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LTaggedToI* instr_;
};
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiToInteger32(input_reg, input_reg);
__ bind(deferred->exit());
@@ -3890,7 +4300,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
@@ -3906,7 +4316,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
@@ -3946,21 +4356,21 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(NegateCondition(cc), instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(cc, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
__ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
@@ -4032,7 +4442,7 @@ void LCodeGen::DoCheckMapCommon(Register reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
@@ -4052,8 +4462,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- Register temp_reg = ToRegister(instr->TempAt(0));
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
+ __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
}
@@ -4067,8 +4476,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
- Register temp_reg = ToRegister(instr->TempAt(0));
- XMMRegister temp_xmm_reg = ToDoubleRegister(instr->TempAt(1));
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -4088,7 +4496,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
+ __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
@@ -4101,7 +4509,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register reg = ToRegister(instr->TempAt(0));
+ ASSERT(instr->temp()->Equals(instr->result()));
+ Register reg = ToRegister(instr->temp());
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
@@ -4136,10 +4545,11 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
LAllocateObject* instr_;
};
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+ DeferredAllocateObject* deferred =
+ new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
@@ -4172,7 +4582,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
__ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
if (FLAG_debug_code) {
- __ AbortIfSmi(map);
+ __ AssertNotSmi(map);
__ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
Immediate(instance_size >> kPointerSizeLog2));
__ Assert(equal, "Unexpected instance size");
@@ -4222,14 +4632,15 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Heap* heap = isolate()->heap();
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
+ if (CanTransitionToMoreGeneralFastElementsKind(
+ boilerplate_elements_kind, true)) {
__ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
// Load the map's "bit field 2".
@@ -4242,12 +4653,11 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
}
// Set up the parameters to the stub/runtime call.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+ __ PushHeapObject(literals);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
- __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
+ __ Push(isolate()->factory()->empty_fixed_array());
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -4375,10 +4785,11 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
- // Deopt if the literal boilerplate ElementsKind is of a type different than
- // the expected one. The check isn't necessary if the boilerplate has already
- // been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
+ if (CanTransitionToMoreGeneralFastElementsKind(
+ boilerplate_elements_kind, true)) {
__ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
__ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset));
// Load the map's "bit field 2".
@@ -4440,7 +4851,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+ ASSERT(ToRegister(instr->value()).is(rax));
__ push(rax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
@@ -4449,14 +4860,12 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
- // rdi = JS function.
// rcx = literals array.
// rbx = regexp literal.
// rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset = FixedArray::kHeaderSize +
- instr->hydrogen()->literal_index() * kPointerSize;
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadHeapObject(rcx, instr->hydrogen()->literals());
__ movq(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
@@ -4519,7 +4928,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
@@ -4543,7 +4952,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -4629,7 +5038,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -4756,7 +5165,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr);
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(below, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
@@ -4825,11 +5234,19 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ Cmp(result, Smi::FromInt(0));
+ __ j(not_equal, &load_cache);
+ __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
+ __ jmp(&done);
+ __ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ movq(result,
- FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ movq(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ bind(&done);
Condition cc = masm()->CheckSmi(result);
DeoptimizeIf(cc, instr->environment());
}
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h b/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
index 73e1a9b..0f8a62a 100644
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
+++ b/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
@@ -46,21 +46,24 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : chunk_(chunk),
+ : zone_(info->zone()),
+ chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
- deoptimizations_(4),
- jump_table_(4),
- deoptimization_literals_(8),
+ deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- deferred_(8),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -72,6 +75,7 @@ class LCodeGen BASE_EMBEDDED {
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
+ Zone* zone() const { return zone_; }
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
@@ -94,6 +98,7 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredNumberTagU(LNumberTagU* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -112,7 +117,10 @@ class LCodeGen BASE_EMBEDDED {
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
+ void WriteTranslation(LEnvironment* environment,
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -136,7 +144,7 @@ class LCodeGen BASE_EMBEDDED {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -152,10 +160,10 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
void Comment(const char* format, ...);
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
@@ -219,7 +227,10 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
- bool is_tagged);
+ bool is_tagged,
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -267,6 +278,11 @@ class LCodeGen BASE_EMBEDDED {
bool deoptimize_on_minus_zero,
LEnvironment* env);
+
+ void DeoptIfTaggedButNotSmi(LEnvironment* environment,
+ HValue* value,
+ LOperand* operand);
+
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -296,7 +312,8 @@ class LCodeGen BASE_EMBEDDED {
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
- Handle<String> name);
+ Handle<String> name,
+ LEnvironment* env);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
@@ -318,8 +335,17 @@ class LCodeGen BASE_EMBEDDED {
};
void EnsureSpaceForLazyDeopt(int space_needed);
-
- LChunk* const chunk_;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+ template <class T>
+ void PrepareKeyForKeyedOp(T* hydrogen_instr, LOperand* key);
+
+ Zone* zone_;
+ LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc b/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc
index 877ea8c..22183a2 100644
--- a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -36,7 +36,7 @@ namespace v8 {
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32) {}
+ : cgen_(owner), moves_(32, owner->zone()) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
@@ -74,7 +74,7 @@ void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.cc b/src/3rdparty/v8/src/x64/lithium-x64.cc
index 6d723a5..3985dc0 100644
--- a/src/3rdparty/v8/src/x64/lithium-x64.cc
+++ b/src/3rdparty/v8/src/x64/lithium-x64.cc
@@ -179,6 +179,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "sal-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -196,22 +197,22 @@ void LGoto::PrintDataTo(StringStream* stream) {
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
@@ -220,57 +221,57 @@ void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
- InputAt(0)->PrintTo(stream);
- InputAt(1)->PrintTo(stream);
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@@ -280,7 +281,7 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
@@ -294,26 +295,26 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ function()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
@@ -342,7 +343,7 @@ void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
@@ -358,56 +359,20 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(bool is_double) {
return spill_slot_count_++;
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
// All stack slots are Double stack slots on x64.
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
int index = GetNextSpillIndex(is_double);
if (is_double) {
- return LDoubleStackSlot::Create(index);
+ return LDoubleStackSlot::Create(index, zone());
} else {
- return LStackSlot::Create(index);
- }
-}
-
-
-void LChunk::MarkEmptyBlocks() {
- HPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
-
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
+ return LStackSlot::Create(index, zone());
}
}
@@ -430,16 +395,7 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -463,83 +419,9 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap);
- index = instructions_.length();
- instructions_.Add(instr);
- } else {
- index = instructions_.length();
- instructions_.Add(instr);
- instructions_.Add(gap);
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
- ASSERT(result < 0);
- return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
+LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new(zone()) LChunk(info(), graph());
+ chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -554,17 +436,8 @@ LChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* format, ...) {
- if (FLAG_trace_bailout) {
- SmartArrayPointer<char> name(
- info()->shared_info()->DebugName()->ToCString());
- PrintF("Aborting LChunk building in @\"%s\": ", *name);
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- PrintF("\n");
- }
+void LCodeGen::Abort(const char* reason) {
+ info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -735,7 +608,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
@@ -757,7 +630,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
@@ -830,13 +703,16 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift by 0 and
// the result cannot be truncated to int32.
- bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
- if (may_deopt) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
+ break;
+ }
}
}
}
@@ -969,8 +845,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber ||
+ BailoutId ast_id = hydrogen_env->ast_id();
+ ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result = new(zone()) LEnvironment(
@@ -980,7 +856,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->parameter_count(),
argument_count_,
value_count,
- outer);
+ outer,
+ hydrogen_env->entry(),
+ zone());
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -994,7 +872,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
} else {
op = UseAny(value);
}
- result->AddValue(op, value->representation());
+ result->AddValue(op,
+ value->representation(),
+ value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
@@ -1221,6 +1101,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1285,12 +1170,55 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- UNIMPLEMENTED();
+HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
+ // A value with an integer representation does not need to be transformed.
+ if (dividend->representation().IsInteger32()) {
+ return dividend;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (dividend->IsChange() &&
+ HChange::cast(dividend)->from().IsInteger32()) {
+ return HChange::cast(dividend)->value();
+ }
+ return NULL;
+}
+
+
+HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
+ if (divisor->IsConstant() &&
+ HConstant::cast(divisor)->HasInteger32Value()) {
+ HConstant* constant_val = HConstant::cast(divisor);
+ return constant_val->CopyToRepresentation(Representation::Integer32(),
+ divisor->block()->zone());
+ }
return NULL;
}
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ HValue* right = instr->right();
+ ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
+ LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
+ int32_t divisor_si = HConstant::cast(right)->Integer32Value();
+ if (divisor_si == 0) {
+ LOperand* dividend = UseRegister(instr->left());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
+ } else if (IsPowerOf2(abs(divisor_si))) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
+ return divisor_si < 0 ? AssignEnvironment(result) : result;
+ } else {
+ // use two r64
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ LOperand* temp = TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LMathFloorOfDiv(dividend, divisor, temp));
+ return divisor_si < 0 ? AssignEnvironment(result) : result;
+ }
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@@ -1396,6 +1324,26 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
+ right = UseOrConstantAtStart(instr->MostConstantOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+ return DefineSameAsFirst(minmax);
+}
+
+
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
@@ -1578,6 +1526,12 @@ LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
}
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
@@ -1593,8 +1547,8 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), rax);
- LDateField* result = new LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, rax), instr);
+ LDateField* result = new(zone()) LDateField(object, instr->index());
+ return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1642,14 +1596,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- bool needs_check = !instr->value()->type().IsSmi();
- if (needs_check) {
+ if (instr->value()->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
+ } else {
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
- } else {
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
}
}
} else if (from.IsDouble()) {
@@ -1670,16 +1623,26 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp = FixedTemp(xmm1);
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ } else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
- ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp = FixedTemp(xmm1);
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
+ } else {
+ ASSERT(to.IsDouble());
+ LOperand* value = Use(instr->value());
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+ }
}
}
UNREACHABLE();
@@ -1701,9 +1664,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp = TempRegister();
+ LUnallocated* temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(result);
+ return AssignEnvironment(Define(result, temp));
}
@@ -1731,8 +1694,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg,
- TempRegister()));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
} else if (input_rep.IsInteger32()) {
return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
} else {
@@ -1740,7 +1702,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve xmm1 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
- TempRegister(),
FixedTemp(xmm1));
return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1879,50 +1840,35 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ ElementsKind elements_kind = instr->elements_kind();
+ bool clobbers_key = ArrayOpClobbersKey<HLoadKeyed>(instr);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
+ LLoadKeyed* result = new(zone()) LLoadKeyed(elements, key);
+#ifdef DEBUG
+ if (instr->is_external()) {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ }
+#endif
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ DefineAsRegister(result);
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
- AssignEnvironment(load_instr) : load_instr;
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -1935,63 +1881,36 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* obj = UseTempRegister(instr->object());
+ bool clobbers_key = ArrayOpClobbersKey<HStoreKeyed>(instr);
+ LOperand* key = (clobbers_key || needs_write_barrier)
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
-
LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstant(instr->key());
+#ifdef DEBUG
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ } else {
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->value()->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+ }
+#endif
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ LStoreKeyed* result = new(zone()) LStoreKeyed(elements, key, val);
+ ASSERT(result != NULL);
+ return result;
}
@@ -2012,8 +1931,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
- instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+ ElementsKind from_kind = instr->original_map()->elements_kind();
+ ElementsKind to_kind = instr->transitioned_map()->elements_kind();
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
@@ -2035,10 +1955,19 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = needs_write_barrier
- ? UseTempRegister(instr->object())
- : UseRegisterAtStart(instr->object());
+ bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ instr->NeedsWriteBarrierForMap();
+
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = instr->is_in_object()
+ ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map
+ ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
@@ -2046,8 +1975,8 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
- ? TempRegister() : NULL;
+ LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
+ needs_write_barrier_for_map) ? TempRegister() : NULL;
return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2092,7 +2021,7 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LAllocateObject* result = new LAllocateObject(TempRegister());
+ LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
@@ -2131,6 +2060,7 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2169,12 +2099,10 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2228,7 +2156,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+ pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
@@ -2254,10 +2182,11 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->function(),
undefined,
instr->call_kind(),
- instr->is_construct());
+ instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
+ inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2269,7 +2198,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* env = current_block_->last_environment();
- if (instr->arguments_pushed()) {
+ if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count;
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.h b/src/3rdparty/v8/src/x64/lithium-x64.h
index ac1a5db..a437a2b 100644
--- a/src/3rdparty/v8/src/x64/lithium-x64.h
+++ b/src/3rdparty/v8/src/x64/lithium-x64.h
@@ -96,6 +96,7 @@ class LCodeGen;
V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
+ V(MapEnumLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -108,6 +109,7 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
+ V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -115,7 +117,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
- V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -125,17 +126,18 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathFloorOfDiv) \
+ V(MathMinMax) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
+ V(NumberTagU) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
@@ -153,15 +155,14 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@@ -257,11 +258,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -270,6 +266,15 @@ class LInstruction: public ZoneObject {
#endif
private:
+ // Iterator support.
+ friend class InputIterator;
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -289,16 +294,18 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- int InputCount() { return I; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() { return I; }
+ virtual LOperand* InputAt(int i) { return inputs_[i]; }
+
+ virtual int TempCount() { return T; }
+ virtual LOperand* TempAt(int i) { return temps_[i]; }
};
@@ -333,8 +340,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos,
+ Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
return parallel_moves_[pos];
}
@@ -462,10 +472,10 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = function;
}
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
};
@@ -481,12 +491,12 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
inputs_[3] = elements;
}
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* length() { return inputs_[2]; }
LOperand* elements() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
};
@@ -498,12 +508,12 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
inputs_[2] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
LOperand* arguments() { return inputs_[0]; }
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
virtual void PrintDataTo(StringStream* stream);
};
@@ -514,6 +524,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = elements;
}
+ LOperand* elements() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
@@ -533,6 +545,10 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
@@ -546,11 +562,34 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
+class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathFloorOfDiv(LOperand* left,
+ LOperand* right,
+ LOperand* temp = NULL) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
class LMulI: public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
@@ -558,6 +597,9 @@ class LMulI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
@@ -570,6 +612,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
@@ -588,6 +633,8 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
@@ -603,6 +650,9 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
};
@@ -614,6 +664,8 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = left;
}
+ LOperand* left() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
@@ -627,6 +679,9 @@ class LIsNilAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
@@ -643,6 +698,8 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
@@ -657,6 +714,9 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
@@ -670,6 +730,8 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
@@ -684,6 +746,9 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
@@ -699,6 +764,9 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
@@ -715,6 +783,8 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
@@ -729,6 +799,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
@@ -740,6 +812,8 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
@@ -756,6 +830,10 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
@@ -771,6 +849,9 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -799,6 +880,9 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
@@ -810,6 +894,9 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
@@ -838,6 +925,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@@ -848,6 +936,9 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
@@ -864,7 +955,8 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
}
Token::Value op() const { return op_; }
-
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
@@ -882,6 +974,9 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
@@ -901,6 +996,9 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
}
+
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -923,6 +1021,8 @@ class LBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
@@ -936,6 +1036,8 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
@@ -957,6 +1059,8 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
@@ -968,18 +1072,34 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
"fixed-array-base-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
+class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
@@ -991,6 +1111,8 @@ class LValueOf: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
};
@@ -1002,11 +1124,12 @@ class LDateField: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = date;
}
+ LOperand* date() { return inputs_[0]; }
+ Smi* index() const { return index_; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
- Smi* index() const { return index_; }
-
private:
Smi* index_;
};
@@ -1018,6 +1141,8 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1028,6 +1153,8 @@ class LBitNotI: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
@@ -1039,11 +1166,29 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
+class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
@@ -1051,6 +1196,9 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
@@ -1062,6 +1210,8 @@ class LRandom: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
+ LOperand* global_object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
@@ -1076,6 +1226,8 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
}
Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
@@ -1094,12 +1246,14 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
- Token::Value op() const { return op_; }
-
private:
Token::Value op_;
};
@@ -1111,6 +1265,8 @@ class LReturn: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1121,6 +1277,8 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
@@ -1172,6 +1330,8 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
@@ -1182,64 +1342,43 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ bool is_external() const {
+ return hydrogen()->is_external();
}
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
+template <class T>
+inline static bool ArrayOpClobbersKey(T *value) {
+ CHECK(value->IsLoadKeyed() || value->IsStoreKeyed());
+ return !value->IsConstant() && (value->key()->representation().IsTagged()
+ || value->IsDehoisted());
+}
+
+
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
@@ -1283,10 +1422,11 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
- LOperand* value() { return inputs_[0]; }
};
@@ -1298,12 +1438,13 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
inputs_[1] = value;
}
+ LOperand* global_object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
- LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(1); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1314,10 +1455,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1332,11 +1474,13 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
temps_[0] = temp;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1349,6 +1493,8 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
@@ -1385,9 +1531,9 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+ LOperand* context() { return inputs_[0]; }
- LOperand* context() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
};
@@ -1416,9 +1562,9 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+ LOperand* global() { return inputs_[0]; }
- LOperand* global() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
@@ -1440,11 +1586,11 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- LOperand* function() { return inputs_[0]; }
-
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1531,6 +1677,8 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = constructor;
}
+ LOperand* constructor() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1556,20 +1704,52 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
+class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
+class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagD(LOperand* value, LOperand* temp) {
@@ -1577,6 +1757,9 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
};
@@ -1588,6 +1771,8 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1603,6 +1788,9 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1616,6 +1804,8 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
@@ -1626,6 +1816,8 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
};
@@ -1638,10 +1830,11 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
+ LOperand* value() { return inputs_[0]; }
bool needs_check() const { return needs_check_; }
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
private:
bool needs_check_;
};
@@ -1655,14 +1848,15 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
temps_[0] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
@@ -1677,88 +1871,42 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
inputs_[1] = value;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
+ bool is_external() const { return hydrogen()->is_external(); }
+ LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream);
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
@@ -1767,14 +1915,15 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1783,21 +1932,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
- LOperand* temp_reg) {
+ LOperand* temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
- temps_[1] = temp_reg;
+ temps_[1] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+ LOperand* temp() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_reg() { return temps_[0]; }
- LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
@@ -1810,11 +1960,11 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
@@ -1825,11 +1975,11 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
@@ -1839,10 +1989,10 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = char_code;
}
+ LOperand* char_code() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
- LOperand* char_code() { return inputs_[0]; }
};
@@ -1852,10 +2002,10 @@ class LStringLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = string;
}
+ LOperand* string() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
};
@@ -1865,7 +2015,7 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
- LOperand* value() { return InputAt(0); }
+ LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
@@ -1878,6 +2028,8 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
};
@@ -1889,17 +2041,21 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
+class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
public:
explicit LCheckPrototypeMaps(LOperand* temp) {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
@@ -1914,15 +2070,16 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- LClampDToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
+ explicit LClampDToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
}
LOperand* unclamped() { return inputs_[0]; }
@@ -1933,8 +2090,8 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
}
LOperand* unclamped() { return inputs_[0]; }
@@ -1943,17 +2100,16 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 2> {
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value,
- LOperand* temp,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
+ LClampTToUint8(LOperand* unclamped,
+ LOperand* temp_xmm) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp_xmm;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp_xmm() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -1965,6 +2121,8 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
@@ -1975,6 +2133,8 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
@@ -2023,6 +2183,8 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
@@ -2034,6 +2196,8 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2044,6 +2208,8 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
@@ -2059,6 +2225,8 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
@@ -2072,10 +2240,10 @@ class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
};
@@ -2171,69 +2339,13 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LChunk: public ZoneObject {
+class LPlatformChunk: public LChunk {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32),
- pointer_maps_(8),
- inlined_closures_(1) { }
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- Handle<Object> LookupLiteral(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
- }
- int LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
- }
- Label* GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- ASSERT(!label->HasReplacement());
- return label->label();
- }
-
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- void AddInlinedClosure(Handle<JSFunction> closure) {
- inlined_closures_.Add(closure);
- }
-
- private:
- int spill_slot_count_;
- CompilationInfo* info_;
- HGraph* const graph_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2243,7 +2355,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->isolate()->zone()),
+ zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@@ -2252,16 +2364,19 @@ class LChunkBuilder BASE_EMBEDDED {
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+ pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
- LChunk* Build();
+ LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
+ static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+
private:
enum Status {
UNUSED,
@@ -2270,7 +2385,7 @@ class LChunkBuilder BASE_EMBEDDED {
ABORTED
};
- LChunk* chunk() const { return chunk_; }
+ LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
@@ -2280,7 +2395,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* format, ...);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2374,7 +2489,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
- LChunk* chunk_;
+ LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
@@ -2386,7 +2501,7 @@ class LChunkBuilder BASE_EMBEDDED {
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
- int pending_deoptimization_ast_id_;
+ BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
index 3d380a2..962c2e8 100644
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
+++ b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
@@ -53,9 +53,17 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
-static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
+static const int kInvalidRootRegisterDelta = -1;
+
+
+intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
+ if (predictable_code_size() &&
+ (other.address() < reinterpret_cast<Address>(isolate()) ||
+ other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
+ return kInvalidRootRegisterDelta;
+ }
Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate->heap()->roots_array_start());
+ reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
intptr_t delta = other.address() - roots_register_value;
return delta;
}
@@ -64,8 +72,8 @@ static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(target);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
@@ -77,8 +85,8 @@ Operand MacroAssembler::ExternalOperand(ExternalReference target,
void MacroAssembler::Load(Register destination, ExternalReference source) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
@@ -96,8 +104,8 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
void MacroAssembler::Store(ExternalReference destination, Register source) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(destination);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
@@ -116,8 +124,8 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
@@ -133,8 +141,8 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
// Operand is lea(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
@@ -216,7 +224,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
int3();
@@ -388,16 +396,14 @@ void MacroAssembler::RecordWrite(Register object,
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
- if (emit_debug_code()) {
- AbortIfSmi(object);
- }
+ AssertNotSmi(object);
if (remembered_set_action == OMIT_REMEMBERED_SET &&
!FLAG_incremental_marking) {
return;
}
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
cmpq(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
@@ -538,7 +544,7 @@ void MacroAssembler::Abort(const char* msg) {
}
-void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
@@ -743,17 +749,52 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
+#if ENABLE_EXTRA_CHECKS
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = rax;
+ Register map = rcx;
+
+ JumpIfSmi(return_value, &ok, Label::kNear);
+ movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ j(below, &ok, Label::kNear);
+
+ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ j(above_equal, &ok, Label::kNear);
+
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kTrueValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kFalseValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kNullValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ Abort("API call returned invalid object");
+
+ bind(&ok);
+#endif
+
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
- bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-
bind(&empty_result);
// It was zero; the result is undefined.
- Move(rax, factory->undefined_value());
+ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
jmp(&prologue);
+ bind(&promote_scheduled_exception);
+ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
@@ -798,7 +839,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
movq(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
@@ -892,6 +933,38 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
}
+
+bool MacroAssembler::IsUnsafeInt(const int x) {
+ static const int kMaxBits = 17;
+ return !is_intn(x, kMaxBits);
+}
+
+
+void MacroAssembler::SafeMove(Register dst, Smi* src) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
+ Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xor_(dst, kScratchRegister);
+ } else {
+ Move(dst, src);
+ }
+}
+
+
+void MacroAssembler::SafePush(Smi* src) {
+ ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
+ Push(Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xor_(Operand(rsp, 0), kScratchRegister);
+ } else {
+ Push(src);
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
@@ -1040,18 +1113,14 @@ void MacroAssembler::SmiTest(Register src) {
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
- if (emit_debug_code()) {
- AbortIfNotSmi(smi1);
- AbortIfNotSmi(smi2);
- }
+ AssertSmi(smi1);
+ AssertSmi(smi2);
cmpq(smi1, smi2);
}
void MacroAssembler::SmiCompare(Register dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- }
+ AssertSmi(dst);
Cmp(dst, src);
}
@@ -1068,27 +1137,21 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
+ AssertSmi(dst);
+ AssertSmi(src);
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
+ AssertSmi(dst);
+ AssertSmi(src);
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- }
+ AssertSmi(dst);
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
}
@@ -2165,7 +2228,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
j(not_equal, failure, near_jump);
}
@@ -2377,7 +2440,7 @@ void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
void MacroAssembler::Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
@@ -2460,6 +2523,12 @@ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
};
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
+ const Immediate& imm) {
+ movq(SafepointRegisterSlot(dst), imm);
+}
+
+
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
movq(SafepointRegisterSlot(dst), src);
}
@@ -2658,10 +2727,12 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastElementValue));
+ Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
@@ -2669,23 +2740,26 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastElementValue));
+ Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
j(above, fail, distance);
}
@@ -2749,24 +2823,18 @@ void MacroAssembler::CompareMap(Register obj,
CompareMapMode mode) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- Map* transitioned_fast_element_map(
- map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
- ASSERT(transitioned_fast_element_map == NULL ||
- map->elements_kind() != FAST_ELEMENTS);
- if (transitioned_fast_element_map != NULL) {
- j(equal, early_success, Label::kNear);
- Cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(transitioned_fast_element_map));
- }
-
- Map* transitioned_double_map(
- map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
- ASSERT(transitioned_double_map == NULL ||
- map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
- if (transitioned_double_map != NULL) {
- j(equal, early_success, Label::kNear);
- Cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(transitioned_double_map));
+ ElementsKind kind = map->elements_kind();
+ if (IsFastElementsKind(kind)) {
+ bool packed = IsFastPackedElementsKind(kind);
+ Map* current_map = *map;
+ while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
+ kind = GetNextMoreGeneralFastElementsKind(kind, packed);
+ current_map = current_map->LookupElementsTransitionMap(kind);
+ if (!current_map) break;
+ j(equal, early_success, Label::kNear);
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(current_map));
+ }
}
}
}
@@ -2800,33 +2868,66 @@ void MacroAssembler::ClampUint8(Register reg) {
void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
XMMRegister temp_xmm_reg,
- Register result_reg,
- Register temp_reg) {
+ Register result_reg) {
Label done;
- Set(result_reg, 0);
+ Label conv_failure;
xorps(temp_xmm_reg, temp_xmm_reg);
- ucomisd(input_reg, temp_xmm_reg);
- j(below, &done, Label::kNear);
- uint64_t one_half = BitCast<uint64_t, double>(0.5);
- Set(temp_reg, one_half);
- movq(temp_xmm_reg, temp_reg);
- addsd(temp_xmm_reg, input_reg);
- cvttsd2si(result_reg, temp_xmm_reg);
+ cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
+ cmpl(result_reg, Immediate(0x80000000));
+ j(equal, &conv_failure, Label::kNear);
+ movl(result_reg, Immediate(0));
+ setcc(above, result_reg);
+ subl(result_reg, Immediate(1));
+ andl(result_reg, Immediate(255));
+ jmp(&done, Label::kNear);
+ bind(&conv_failure);
+ Set(result_reg, 0);
+ ucomisd(input_reg, temp_xmm_reg);
+ j(below, &done, Label::kNear);
Set(result_reg, 255);
bind(&done);
}
+static double kUint32Bias =
+ static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
+
+
+void MacroAssembler::LoadUint32(XMMRegister dst,
+ Register src,
+ XMMRegister scratch) {
+ Label done;
+ cmpl(src, Immediate(0));
+ movq(kScratchRegister,
+ reinterpret_cast<int64_t>(&kUint32Bias),
+ RelocInfo::NONE);
+ movsd(scratch, Operand(kScratchRegister, 0));
+ cvtlsi2sd(dst, src);
+ j(not_sign, &done, Label::kNear);
+ addsd(dst, scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- movq(descriptors, FieldOperand(map,
- Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
- Move(descriptors, isolate()->factory()->empty_descriptor_array());
- bind(&not_smi);
+ movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
+ and_(dst, kScratchRegister);
}
@@ -2845,61 +2946,75 @@ void MacroAssembler::DispatchMap(Register obj,
}
-void MacroAssembler::AbortIfNotNumber(Register object) {
- Label ok;
- Condition is_smi = CheckSmi(object);
- j(is_smi, &ok, Label::kNear);
- Cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Assert(equal, "Operand not a number");
- bind(&ok);
+void MacroAssembler::AssertNumber(Register object) {
+ if (emit_debug_code()) {
+ Label ok;
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, &ok, Label::kNear);
+ Cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(equal, "Operand is not a number");
+ bind(&ok);
+ }
}
-void MacroAssembler::AbortIfSmi(Register object) {
- Condition is_smi = CheckSmi(object);
- Assert(NegateCondition(is_smi), "Operand is a smi");
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(NegateCondition(is_smi), "Operand is a smi");
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, "Operand is not a smi");
+ }
}
-void MacroAssembler::AbortIfNotSmi(const Operand& object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
+void MacroAssembler::AssertSmi(const Operand& object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, "Operand is not a smi");
+ }
}
-void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) {
- ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
- cmpq(kScratchRegister, int32_register);
- Assert(above_equal, "32 bit value in register is not zero-extended");
+void MacroAssembler::AssertZeroExtended(Register int32_register) {
+ if (emit_debug_code()) {
+ ASSERT(!int32_register.is(kScratchRegister));
+ movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
+ cmpq(kScratchRegister, int32_register);
+ Check(above_equal, "32 bit value in register is not zero-extended");
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- testb(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is not a string");
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(below, "Operand is not a string");
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, "Operand is a smi and not a string");
+ push(object);
+ movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(below, "Operand is not a string");
+ }
}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- ASSERT(!src.is(kScratchRegister));
- LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
- Check(equal, message);
+void MacroAssembler::AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ if (emit_debug_code()) {
+ ASSERT(!src.is(kScratchRegister));
+ LoadRoot(kScratchRegister, root_value_index);
+ cmpq(src, kScratchRegister);
+ Check(equal, message);
+ }
}
@@ -3396,20 +3511,21 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
cmpq(scratch, Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- isolate()->factory()->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
+ isolate()->factory()->native_context_map());
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
@@ -3417,23 +3533,24 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// compatible with the security token in the receiving global
// object.
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// Preserve original value of holder_reg.
push(holder_reg);
- movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ movq(holder_reg,
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
Check(not_equal, "JSGlobalProxy::context() should not be null.");
- // Read the first word and compare to global_context_map(),
+ // Read the first word and compare to native_context_map(),
movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
- Check(equal, "JSGlobalObject::global_context should be a global context.");
+ CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
pop(holder_reg);
}
movq(kScratchRegister,
- FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movq(scratch, FieldOperand(scratch, token_offset));
@@ -3955,7 +4072,7 @@ void MacroAssembler::CopyBytes(Register destination,
int min_length,
Register scratch) {
ASSERT(min_length >= 0);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmpl(length, Immediate(min_length));
Assert(greater_equal, "Invalid min_length");
}
@@ -4053,31 +4170,43 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ movq(scratch,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- int expected_index =
- Context::GetContextMapIndexFromElementsKind(expected_kind);
- cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
+ movq(scratch, Operand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+
+ int offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ cmpq(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- int trans_index =
- Context::GetContextMapIndexFromElementsKind(transitioned_kind);
- movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ movq(map_in_out, FieldOperand(scratch, offset));
}
void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch, Register map_out) {
+ Register function_in, Register scratch,
+ Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
movq(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
+ ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ kind,
+ map_out,
+ scratch,
+ &done);
+ } else if (can_have_holes) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_HOLEY_SMI_ELEMENTS,
map_out,
scratch,
&done);
@@ -4093,10 +4222,11 @@ static const int kRegisterPassedArguments = 6;
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ movq(function,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
movq(function, Operand(function, Context::SlotOffset(index)));
}
@@ -4321,7 +4451,7 @@ void MacroAssembler::EnsureNotWhite(
testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
push(mask_scratch);
@@ -4373,7 +4503,7 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kAsciiStringTag == 0x04);
+ ASSERT(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
addq(length, Immediate(0x04));
@@ -4396,44 +4526,38 @@ void MacroAssembler::EnsureNotWhite(
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next;
+ Label next, start;
Register empty_fixed_array_value = r8;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r9;
- LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
movq(rcx, rax);
- bind(&next);
-
- // Check that there are no elements. Register rcx contains the
- // current JS object we've reached through the prototype chain.
- cmpq(empty_fixed_array_value,
- FieldOperand(rcx, JSObject::kElementsOffset));
- j(not_equal, call_runtime);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in rbx for the subsequent
- // prototype load.
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(rdx, call_runtime);
- // Check that there is an enum cache in the non-empty instance
- // descriptors (rdx). This is the case if the next enumeration
- // index field does not contain a smi.
- movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(rdx, call_runtime);
+ EnumLength(rdx, rbx);
+ Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
+ j(equal, call_runtime);
+
+ jmp(&start);
+
+ bind(&next);
+
+ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- cmpq(rcx, rax);
- j(equal, &check_prototype, Label::kNear);
- movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- cmpq(rdx, empty_fixed_array_value);
+ EnumLength(rdx, rbx);
+ Cmp(rdx, Smi::FromInt(0));
+ j(not_equal, call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register rcx contains the current JS
+ // object we've reached through the prototype chain.
+ cmpq(empty_fixed_array_value,
+ FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
cmpq(rcx, null_value);
j(not_equal, &next);
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.h b/src/3rdparty/v8/src/x64/macro-assembler-x64.h
index 5ec8873..fdddc13 100644
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.h
+++ b/src/3rdparty/v8/src/x64/macro-assembler-x64.h
@@ -317,6 +317,7 @@ class MacroAssembler: public Assembler {
void PopSafepointRegisters() { Popad(); }
// Store the value in register src in the safepoint register stack
// slot for register dst.
+ void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
void StoreToSafepointRegisterSlot(Register dst, Register src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
@@ -774,6 +775,11 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
+ // Support for constant splitting.
+ bool IsUnsafeInt(const int x);
+ void SafeMove(Register dst, Smi* src);
+ void SafePush(Smi* src);
+
// Bit-field support.
void TestBit(const Operand& dst, int bit_index);
@@ -817,7 +823,7 @@ class MacroAssembler: public Assembler {
void Call(ExternalReference ext);
void Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
- unsigned ast_id = kNoASTId);
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
// The size of the code generated for different call instructions.
int CallSize(Address destination, RelocInfo::Mode rmode) {
@@ -877,9 +883,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiOnlyElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
+ void CheckFastSmiElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by index in
@@ -936,32 +942,45 @@ class MacroAssembler: public Assembler {
void ClampDoubleToUint8(XMMRegister input_reg,
XMMRegister temp_xmm_reg,
- Register result_reg,
- Register temp_reg);
+ Register result_reg);
+
+ void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const int shift = Field::kShift + kSmiShift;
+ static const int mask = Field::kMask >> Field::kShift;
+ shr(reg, Immediate(shift));
+ and_(reg, Immediate(mask));
+ shl(reg, Immediate(kSmiShift));
+ }
- // Abort execution if argument is not a number. Used in debug code.
- void AbortIfNotNumber(Register object);
+ // Abort execution if argument is not a number, enabled via --debug-code.
+ void AssertNumber(Register object);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
- // Abort execution if argument is not a smi. Used in debug code.
- void AbortIfNotSmi(Register object);
- void AbortIfNotSmi(const Operand& object);
+ // Abort execution if argument is not a smi, enabled via --debug-code.
+ void AssertSmi(Register object);
+ void AssertSmi(const Operand& object);
// Abort execution if a 64 bit register containing a 32 bit payload does not
- // have zeros in the top 32 bits.
- void AbortIfNotZeroExtended(Register reg);
+ // have zeros in the top 32 bits, enabled via --debug-code.
+ void AssertZeroExtended(Register reg);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ // Abort execution if argument is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
// ---------------------------------------------------------------------------
// Exception handling
@@ -1128,8 +1147,8 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the global context if the map in register
- // map_in_out is the cached Array map in the global context of
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@@ -1141,7 +1160,8 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
- Register map_out);
+ Register map_out,
+ bool can_have_holes);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -1154,7 +1174,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
@@ -1322,6 +1342,8 @@ class MacroAssembler: public Assembler {
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi* value);
+ intptr_t RootRegisterDelta(ExternalReference other);
+
// Moves the smi value to the destination register.
void LoadSmiConstant(Register dst, Smi* value);
@@ -1441,12 +1463,12 @@ inline Operand ContextOperand(Register context, int index) {
inline Operand GlobalObjectOperand() {
- return ContextOperand(rsi, Context::GLOBAL_INDEX);
+ return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
}
static inline Operand QmlGlobalObjectOperand() {
- return ContextOperand(rsi, Context::QML_GLOBAL_INDEX);
+ return ContextOperand(rsi, Context::QML_GLOBAL_OBJECT_INDEX);
}
diff --git a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
index bf232bf..86f7bfe 100644
--- a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,21 +44,23 @@ namespace internal {
/*
* This assembler uses the following register assignment convention
- * - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - rdi : current position in input, as negative offset from end of string.
+ * - rdx : Currently loaded character(s) as ASCII or UC16. Must be loaded
+ * using LoadCurrentCharacter before using any of the dispatch methods.
+ * Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
+ * - rdi : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character
- * offset! Is always a 32-bit signed (negative) offset, but must be
+ * offset! Is always a 32-bit signed (negative) offset, but must be
* maintained sign-extended to 64 bits, since it is used as index.
- * - rsi : end of input (points to byte after last character in input),
+ * - rsi : End of input (points to byte after last character in input),
* so that rsi+rdi points to the current character.
- * - rbp : frame pointer. Used to access arguments, local variables and
+ * - rbp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
- * - rsp : points to tip of C stack.
- * - rcx : points to tip of backtrack stack. The backtrack stack contains
- * only 32-bit values. Most are offsets from some base (e.g., character
+ * - rsp : Points to tip of C stack.
+ * - rcx : Points to tip of backtrack stack. The backtrack stack contains
+ * only 32-bit values. Most are offsets from some base (e.g., character
* positions from end of string or code location from Code* pointer).
- * - r8 : code object pointer. Used to convert between absolute and
+ * - r8 : Code object pointer. Used to convert between absolute and
* code-object-relative addresses.
*
* The registers rax, rbx, r9 and r11 are free to use for computations.
@@ -72,20 +74,22 @@ namespace internal {
*
* The stack will have the following content, in some order, indexable from the
* frame pointer (see, e.g., kStackHighEnd):
- * - Isolate* isolate (Address of the current isolate)
+ * - Isolate* isolate (address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
- * - stack_area_base (High end of the memory area to use as
+ * - stack_area_base (high end of the memory area to use as
* backtracking stack)
+ * - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
+ * - end of input (address of end of string)
+ * - start of input (address of first character in string)
* - start index (character index of start)
* - String* input_string (input string)
* - return address
* - backup of callee save registers (rbx, possibly rsi and rdi).
+ * - success counter (only useful for global regexp to count matches)
* - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
+ * position -1). Used to initialize capture registers to a non-position.
* - At start of string (if 1, we are starting at the start of the
* string, otherwise 0)
* - register 0 rbp[-n] (Only positions must be stored in the first
@@ -94,7 +98,7 @@ namespace internal {
*
* The first num_saved_registers_ registers are initialized to point to
* "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out uninitialized.
+ * character of the string). The remaining registers starts out uninitialized.
*
* The first seven values must be provided by the calling code by
* calling the code's entry address cast to a function pointer with the
@@ -113,10 +117,12 @@ namespace internal {
RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
Mode mode,
- int registers_to_save)
- : masm_(Isolate::Current(), NULL, kRegExpCodeSize),
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(Isolate::Current(), NULL, kRegExpCodeSize),
no_root_array_scope_(&masm_),
- code_relative_fixup_positions_(4),
+ code_relative_fixup_positions_(4, zone),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -347,6 +353,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// In either case succeed immediately.
__ j(equal, &fallthrough);
+ // -----------------------
+ // rdx - Start of capture
+ // rbx - length of capture
+ // Check that there are sufficient characters left in the input.
+ __ movl(rax, rdi);
+ __ addl(rax, rbx);
+ BranchOrBacktrack(greater, on_no_match);
+
if (mode_ == ASCII) {
Label loop_increment;
if (on_no_match == NULL) {
@@ -523,15 +537,6 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
}
-void RegExpMacroAssemblerX64::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
- __ movq(rax, register_location(reg1));
- __ cmpq(rax, register_location(reg2));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
void RegExpMacroAssemblerX64::CheckNotCharacter(uint32_t c,
Label* on_not_equal) {
__ cmpl(current_character(), Immediate(c));
@@ -744,13 +749,16 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerX64::Fail() {
- ASSERT(FAILURE == 0); // Return value for failure is zero.
- __ Set(rax, 0);
+ STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
+ if (!global()) {
+ __ Set(rax, FAILURE);
+ }
__ jmp(&exit_label_);
}
Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
+ Label return_rax;
// Finalize code - write the entry point code now we know how many
// registers we need.
// Entry code:
@@ -784,7 +792,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ASSERT_EQ(kInputStart, -3 * kPointerSize);
ASSERT_EQ(kInputEnd, -4 * kPointerSize);
ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kStackHighEnd, -6 * kPointerSize);
+ ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
__ push(rdi);
__ push(rsi);
__ push(rdx);
@@ -795,7 +803,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ push(rbx); // Callee-save
#endif
- __ push(Immediate(0)); // Make room for "at start" constant.
+ __ push(Immediate(0)); // Number of successful matches in a global regexp.
+ __ push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -815,14 +824,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ Set(rax, EXCEPTION);
- __ jmp(&exit_label_);
+ __ jmp(&return_rax);
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
__ testq(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &exit_label_);
+ __ j(not_zero, &return_rax);
__ bind(&stack_ok);
@@ -847,19 +856,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
- if (num_saved_registers_ > 0) {
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- __ Set(rcx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
- __ subq(rcx, Immediate(kPointerSize));
- __ cmpq(rcx,
- Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
- __ j(greater, &init_loop);
- }
+#ifdef WIN32
// Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
@@ -869,21 +866,49 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ movq(register_location(i), rax); // One write every page.
}
+#endif // WIN32
- // Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
- // Load previous char as initial value of current-character.
- Label at_start;
- __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
- __ j(equal, &at_start);
- LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
- __ jmp(&start_label_);
- __ bind(&at_start);
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
+ __ j(not_equal, &load_char_start_regexp, Label::kNear);
__ Set(current_character(), '\n');
- __ jmp(&start_label_);
+ __ jmp(&start_regexp, Label::kNear);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) {
+ // Fill saved registers with initial value = start offset - 1
+ // Fill in stack push order, to avoid accessing across an unwritten
+ // page (a problem on Windows).
+ if (num_saved_registers_ > 8) {
+ __ Set(rcx, kRegisterZero);
+ Label init_loop;
+ __ bind(&init_loop);
+ __ movq(Operand(rbp, rcx, times_1, 0), rax);
+ __ subq(rcx, Immediate(kPointerSize));
+ __ cmpq(rcx,
+ Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
+ __ j(greater, &init_loop);
+ } else { // Unroll the loop.
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ movq(register_location(i), rax);
+ }
+ }
+ }
+ // Initialize backtrack stack pointer.
+ __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+
+ __ jmp(&start_label_);
// Exit code:
if (success_label_.is_linked()) {
@@ -902,6 +927,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
for (int i = 0; i < num_saved_registers_; i++) {
__ movq(rax, register_location(i));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in rdx for the zero-length check later.
+ __ movq(rdx, rax);
+ }
__ addq(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
__ sar(rax, Immediate(1)); // Convert byte index to character index.
@@ -909,12 +938,57 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movl(Operand(rbx, i * kIntSize), rax);
}
}
- __ Set(rax, SUCCESS);
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ // Increment success counter.
+ __ incq(Operand(rbp, kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
+ __ subq(rcx, Immediate(num_saved_registers_));
+ // Check whether we have enough room for another set of capture results.
+ __ cmpq(rcx, Immediate(num_saved_registers_));
+ __ j(less, &exit_label_);
+
+ __ movq(Operand(rbp, kNumOutputRegisters), rcx);
+ // Advance the location for output.
+ __ addq(Operand(rbp, kRegisterOutput),
+ Immediate(num_saved_registers_ * kIntSize));
+
+ // Prepare rax to initialize registers with its value in the next run.
+ __ movq(rax, Operand(rbp, kInputStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // rdx: capture start index
+ __ cmpq(rdi, rdx);
+ // Not a zero-length match, restart.
+ __ j(not_equal, &load_char_start_regexp);
+ // rdi (offset from the end) is zero if we already reached the end.
+ __ testq(rdi, rdi);
+ __ j(zero, &exit_label_, Label::kNear);
+ // Advance current position after a zero-length match.
+ if (mode_ == UC16) {
+ __ addq(rdi, Immediate(2));
+ } else {
+ __ incq(rdi);
+ }
+ }
+
+ __ jmp(&load_char_start_regexp);
+ } else {
+ __ movq(rax, Immediate(SUCCESS));
+ }
}
- // Exit and return rax
__ bind(&exit_label_);
+ if (global()) {
+ // Return the number of successful captures.
+ __ movq(rax, Operand(rbp, kSuccessfulCaptures));
+ }
+ __ bind(&return_rax);
#ifdef _WIN64
// Restore callee save registers.
__ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
@@ -951,7 +1025,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ testq(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
- __ j(not_zero, &exit_label_);
+ __ j(not_zero, &return_rax);
// Restore registers.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -1012,7 +1086,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ Set(rax, EXCEPTION);
- __ jmp(&exit_label_);
+ __ jmp(&return_rax);
}
FixupCodeRelativePositions();
@@ -1135,8 +1209,9 @@ void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
}
-void RegExpMacroAssemblerX64::Succeed() {
+bool RegExpMacroAssemblerX64::Succeed() {
__ jmp(&success_label_);
+ return global();
}
diff --git a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h
index cd24b60..a082cf2 100644
--- a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -41,7 +41,7 @@ namespace internal {
class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerX64(Mode mode, int registers_to_save);
+ RegExpMacroAssemblerX64(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerX64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
@@ -66,7 +66,6 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
- virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
@@ -109,7 +108,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
- virtual void Succeed();
+ virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@@ -154,7 +153,12 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ // For the case of global regular expression, we have room to store at least
+ // one set of capture results. For the case of non-global regexp, we ignore
+ // this value. NumOutputRegisters is passed as 32-bit value. The upper
+ // 32 bit of this 64-bit stack slot may contain garbage.
+ static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@@ -167,8 +171,12 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex - kPointerSize;
static const int kInputEnd = kInputStart - kPointerSize;
static const int kRegisterOutput = kInputEnd - kPointerSize;
- static const int kStackHighEnd = kRegisterOutput - kPointerSize;
- static const int kDirectCall = kFrameAlign;
+ // For the case of global regular expression, we have room to store at least
+ // one set of capture results. For the case of non-global regexp, we ignore
+ // this value.
+ static const int kNumOutputRegisters = kRegisterOutput - kPointerSize;
+ static const int kStackHighEnd = kFrameAlign;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
#endif
@@ -183,14 +191,14 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kStackHighEnd - kPointerSize;
+ static const int kBackup_rbx = kNumOutputRegisters - kPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
+ static const int kSuccessfulCaptures = kLastCalleeSaveRegister - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kInputStartMinusOne =
- kLastCalleeSaveRegister - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
@@ -232,7 +240,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
void BranchOrBacktrack(Condition condition, Label* to);
void MarkPositionForCodeRelativeFixup() {
- code_relative_fixup_positions_.Add(masm_.pc_offset());
+ code_relative_fixup_positions_.Add(masm_.pc_offset(), zone());
}
void FixupCodeRelativePositions();
diff --git a/src/3rdparty/v8/src/x64/simulator-x64.h b/src/3rdparty/v8/src/x64/simulator-x64.h
index df8423a..8aba701 100644
--- a/src/3rdparty/v8/src/x64/simulator-x64.h
+++ b/src/3rdparty/v8/src/x64/simulator-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,12 +40,12 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, Address, int, Isolate*);
+ const byte*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
diff --git a/src/3rdparty/v8/src/x64/stub-cache-x64.cc b/src/3rdparty/v8/src/x64/stub-cache-x64.cc
index 5721e9b..b120efb 100644
--- a/src/3rdparty/v8/src/x64/stub-cache-x64.cc
+++ b/src/3rdparty/v8/src/x64/stub-cache-x64.cc
@@ -228,15 +228,15 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
Register prototype) {
// Load the global or builtins object from the current context.
__ movq(prototype,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
__ movq(prototype,
- FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ FieldOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
__ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ movq(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
__ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -249,13 +249,13 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
- __ Move(prototype, isolate->global());
- __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
+ __ Move(prototype, isolate->global_object());
+ __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
prototype);
__ j(not_equal, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
- JSFunction::cast(isolate->global_context()->get(index)));
+ JSFunction::cast(isolate->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -731,10 +731,22 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
int index,
Handle<Map> transition,
+ Handle<String> name,
Register receiver_reg,
Register name_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label) {
+ LookupResult lookup(masm->isolate());
+ object->Lookup(*name, &lookup);
+ if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
+ // In sloppy mode, we could just return the value and be done. However, we
+ // might be in strict mode, where we have to throw. Since we cannot tell,
+ // go into slow case unconditionally.
+ __ jmp(miss_label);
+ return;
+ }
+
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
@@ -743,7 +755,32 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Check that we are allowed to write this.
+ if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ JSObject* holder;
+ if (lookup.IsFound()) {
+ holder = lookup.holder();
+ } else {
+ // Find the top object.
+ holder = *object;
+ do {
+ holder = JSObject::cast(holder->GetPrototype());
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+ // We need an extra register, push
+ __ push(name_reg);
+ Label miss_pop, done_check;
+ CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
+ scratch1, scratch2, name, &miss_pop);
+ __ jmp(&done_check);
+ __ bind(&miss_pop);
+ __ pop(name_reg);
+ __ jmp(miss_label);
+ __ bind(&done_check);
+ __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -754,11 +791,11 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
- __ pop(scratch); // Return address.
+ __ pop(scratch1); // Return address.
__ push(receiver_reg);
__ Push(transition);
__ push(rax);
- __ push(scratch);
+ __ push(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
masm->isolate()),
@@ -768,9 +805,19 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
if (!transition.is_null()) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset), transition);
+ // Update the map of the object.
+ __ Move(scratch1, transition);
+ __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+ // Update the write barrier for the map field and pass the now unused
+ // name_reg as scratch register.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ name_reg,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
}
// Adjust for the number of properties stored in the object. Even in the
@@ -787,19 +834,19 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
__ RecordWriteField(
- receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs);
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch, offset), rax);
+ __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movq(FieldOperand(scratch1, offset), rax);
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
__ RecordWriteField(
- scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
}
// Return the value (register rax).
@@ -982,6 +1029,49 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
}
+void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
+ ASSERT(!receiver.is(scratch1));
+ ASSERT(!receiver.is(scratch2));
+ ASSERT(!receiver.is(scratch3));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch1;
+ __ movq(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ miss,
+ &probe_done,
+ dictionary,
+ name_reg,
+ scratch2,
+ scratch3);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch3;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ movq(scratch2,
+ Operand(dictionary, index, times_pointer_size,
+ kValueOffset - kHeapObjectTag));
+ __ movq(scratch3, callback, RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(scratch2, scratch3);
+ __ j(not_equal, miss);
+}
+
+
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -989,6 +1079,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@@ -999,6 +1090,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ GenerateDictionaryLoadCallback(
+ reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
+ }
+
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch2.is(reg));
__ pop(scratch2); // Get return address to place it below.
@@ -1096,12 +1192,13 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
}
}
@@ -1173,7 +1270,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), rax, holder_reg,
@@ -1343,7 +1440,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -1434,17 +1531,32 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiOnlyElements(rbx, &call_builtin);
+ __ CheckFastSmiElements(rbx, &call_builtin);
// rdx: receiver
// rbx: map
- __ movq(r9, rdi); // Backup rdi as it is going to be trashed.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+
+ Label try_holey_map;
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
rbx,
rdi,
+ &try_holey_map);
+
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
+ // Restore edi.
+ __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
+ __ jmp(&fast_object);
+
+ __ bind(&try_holey_map);
+ __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
+ FAST_HOLEY_ELEMENTS,
+ rbx,
+ rdi,
&call_builtin);
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
- __ movq(rdi, r9);
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
+ __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(rbx, &call_builtin);
@@ -1852,7 +1964,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -1967,7 +2079,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2216,7 +2328,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2279,7 +2391,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2296,7 +2408,13 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
Label miss;
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ rdx, rcx, rbx, rdi,
+ &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2304,14 +2422,17 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2319,19 +2440,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- rsp[0] : return address
// -----------------------------------
Label miss;
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(rdx, &miss);
+ CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
- // Check that the map of the object hasn't changed.
- __ CheckMap(rdx, Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rdx, rbx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ pop(rbx); // remove the return address
__ push(rdx); // receiver
@@ -2351,7 +2465,81 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(rax);
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ __ push(rdx);
+ __ push(rax);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(rax);
+
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(rdx, &miss);
+ CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
+
+ GenerateStoreViaSetter(masm(), setter);
+
+ __ bind(&miss);
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2397,7 +2585,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2445,7 +2633,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2469,7 +2657,13 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ j(not_equal, &miss);
// Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ rdx, rcx, rbx, rdi,
+ &miss);
// Handle store cache miss.
__ bind(&miss);
@@ -2478,7 +2672,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
@@ -2502,7 +2698,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -2540,7 +2736,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -2578,7 +2774,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@@ -2598,7 +2794,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2613,13 +2809,76 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -- rsp[0] : return address
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, callback,
+ GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, r8, callback,
name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(rax);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(rax, &miss);
+ CheckPrototypes(receiver, rax, holder, rbx, rdx, rdi, name, &miss);
+
+ GenerateLoadViaGetter(masm(), getter),
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2639,7 +2898,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2663,7 +2922,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2707,7 +2966,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2736,7 +2995,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2758,14 +3017,14 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
__ Cmp(rax, name);
__ j(not_equal, &miss);
- GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback,
+ GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, r8, callback,
name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2795,7 +3054,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2825,7 +3084,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2851,7 +3110,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2877,7 +3136,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2903,7 +3162,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2923,7 +3182,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -2951,7 +3210,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -2981,6 +3240,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#endif
// Load the initial map and verify that it is in fact a map.
+ // rdi: constructor
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
@@ -2990,18 +3250,22 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#ifdef DEBUG
// Cannot construct functions this way.
- // rdi: constructor
// rbx: initial map
__ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ Assert(not_equal, "Function constructed by construct stub.");
+ __ Check(not_equal, "Function constructed by construct stub.");
#endif
// Now allocate the JSObject in new space.
- // rdi: constructor
// rbx: initial map
+ ASSERT(function->has_initial_map());
+ int instance_size = function->initial_map()->instance_size();
+#ifdef DEBUG
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
- __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
+ __ cmpq(rcx, Immediate(instance_size));
+ __ Check(equal, "Instance size of initial map changed.");
+#endif
+ __ AllocateInNewSpace(instance_size, rdx, rcx, no_reg,
&generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
@@ -3047,7 +3311,6 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -3369,8 +3632,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3435,8 +3701,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3587,7 +3856,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(rax, &transition_elements_kind);
}
@@ -3611,13 +3880,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ j(not_equal, &miss_force_generic);
__ bind(&finish_store);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
__ SmiToInteger32(rcx, rcx);
__ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
} else {
// Do the store and update the write barrier.
- ASSERT(elements_kind == FAST_ELEMENTS);
+ ASSERT(IsFastObjectElementsKind(elements_kind));
__ SmiToInteger32(rcx, rcx);
__ lea(rcx,
FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
diff --git a/src/3rdparty/v8/src/zone-inl.h b/src/3rdparty/v8/src/zone-inl.h
index ee96ec0..e312b20 100644
--- a/src/3rdparty/v8/src/zone-inl.h
+++ b/src/3rdparty/v8/src/zone-inl.h
@@ -40,7 +40,7 @@ namespace internal {
inline void* Zone::New(int size) {
- ASSERT(ZoneScope::nesting() > 0);
+ ASSERT(scope_nesting_ > 0);
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -90,30 +90,17 @@ ZoneSplayTree<Config>::~ZoneSplayTree() {
// Reset the root to avoid unneeded iteration over all tree nodes
// in the destructor. For a zone-allocated tree, nodes will be
// freed by the Zone.
- SplayTree<Config, ZoneListAllocationPolicy>::ResetRoot();
+ SplayTree<Config, ZoneAllocationPolicy>::ResetRoot();
}
-// TODO(isolates): for performance reasons, this should be replaced with a new
-// operator that takes the zone in which the object should be
-// allocated.
-void* ZoneObject::operator new(size_t size) {
- return ZONE->New(static_cast<int>(size));
-}
-
void* ZoneObject::operator new(size_t size, Zone* zone) {
return zone->New(static_cast<int>(size));
}
-
-inline void* ZoneListAllocationPolicy::New(int size) {
- return ZONE->New(size);
-}
-
-
-template <typename T>
-void* ZoneList<T>::operator new(size_t size) {
- return ZONE->New(static_cast<int>(size));
+inline void* ZoneAllocationPolicy::New(size_t size) {
+ ASSERT(zone_);
+ return zone_->New(static_cast<int>(size));
}
@@ -123,19 +110,14 @@ void* ZoneList<T>::operator new(size_t size, Zone* zone) {
}
-ZoneScope::ZoneScope(Isolate* isolate, ZoneScopeMode mode)
- : isolate_(isolate), mode_(mode) {
- isolate_->zone()->scope_nesting_++;
+ZoneScope::ZoneScope(Zone* zone, ZoneScopeMode mode)
+ : zone_(zone), mode_(mode) {
+ zone_->scope_nesting_++;
}
bool ZoneScope::ShouldDeleteOnExit() {
- return isolate_->zone()->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
-}
-
-
-int ZoneScope::nesting() {
- return Isolate::Current()->zone()->scope_nesting_;
+ return zone_->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
}
diff --git a/src/3rdparty/v8/src/zone.cc b/src/3rdparty/v8/src/zone.cc
index d5d05ab..51b8113 100644
--- a/src/3rdparty/v8/src/zone.cc
+++ b/src/3rdparty/v8/src/zone.cc
@@ -67,20 +67,20 @@ class Segment {
};
-Zone::Zone()
+Zone::Zone(Isolate* isolate)
: zone_excess_limit_(256 * MB),
segment_bytes_allocated_(0),
position_(0),
limit_(0),
scope_nesting_(0),
- segment_head_(NULL) {
+ segment_head_(NULL),
+ isolate_(isolate) {
}
unsigned Zone::allocation_size_ = 0;
ZoneScope::~ZoneScope() {
- ASSERT_EQ(Isolate::Current(), isolate_);
- if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
- isolate_->zone()->scope_nesting_--;
+ if (ShouldDeleteOnExit()) zone_->DeleteAll();
+ zone_->scope_nesting_--;
}
diff --git a/src/3rdparty/v8/src/zone.h b/src/3rdparty/v8/src/zone.h
index 8648465..01e887e 100644
--- a/src/3rdparty/v8/src/zone.h
+++ b/src/3rdparty/v8/src/zone.h
@@ -64,6 +64,8 @@ class Isolate;
class Zone {
public:
+ explicit Zone(Isolate* isolate);
+ ~Zone() { DeleteKeptSegment(); }
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
// allocating new segments of memory on demand using malloc().
inline void* New(int size);
@@ -114,9 +116,6 @@ class Zone {
// the zone.
int segment_bytes_allocated_;
- // Each isolate gets its own zone.
- Zone();
-
// Expand the Zone to hold at least 'size' more bytes and allocate
// the bytes. Returns the address of the newly allocated chunk of
// memory in the Zone. Should only be called if there isn't enough
@@ -148,7 +147,6 @@ class Zone {
class ZoneObject {
public:
// Allocate a new ZoneObject of 'size' bytes in the Zone.
- INLINE(void* operator new(size_t size));
INLINE(void* operator new(size_t size, Zone* zone));
// Ideally, the delete operator should be private instead of
@@ -164,16 +162,16 @@ class ZoneObject {
};
-// The ZoneListAllocationPolicy is used to specialize the GenericList
-// implementation to allocate ZoneLists and their elements in the
-// Zone.
-class ZoneListAllocationPolicy {
+// The ZoneAllocationPolicy is used to specialize generic data
+// structures to allocate themselves and their elements in the Zone.
+struct ZoneAllocationPolicy {
public:
- // Allocate 'size' bytes of memory in the zone.
- static void* New(int size);
+ explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) { }
+ INLINE(void* New(size_t size));
+ INLINE(static void Delete(void *pointer)) { }
- // De-allocation attempts are silently ignored.
- static void Delete(void* p) { }
+ private:
+ Zone* zone_;
};
@@ -182,20 +180,48 @@ class ZoneListAllocationPolicy {
// Zone. ZoneLists cannot be deleted individually; you can delete all
// objects in the Zone by calling Zone::DeleteAll().
template<typename T>
-class ZoneList: public List<T, ZoneListAllocationPolicy> {
+class ZoneList: public List<T, ZoneAllocationPolicy> {
public:
- INLINE(void* operator new(size_t size));
- INLINE(void* operator new(size_t size, Zone* zone));
-
// Construct a new ZoneList with the given capacity; the length is
// always zero. The capacity must be non-negative.
- explicit ZoneList(int capacity)
- : List<T, ZoneListAllocationPolicy>(capacity) { }
+ ZoneList(int capacity, Zone* zone)
+ : List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) { }
+
+ INLINE(void* operator new(size_t size, Zone* zone));
// Construct a new ZoneList by copying the elements of the given ZoneList.
- explicit ZoneList(const ZoneList<T>& other)
- : List<T, ZoneListAllocationPolicy>(other.length()) {
- AddAll(other);
+ ZoneList(const ZoneList<T>& other, Zone* zone)
+ : List<T, ZoneAllocationPolicy>(other.length(),
+ ZoneAllocationPolicy(zone)) {
+ AddAll(other, ZoneAllocationPolicy(zone));
+ }
+
+ // We add some convenience wrappers so that we can pass in a Zone
+ // instead of a (less convenient) ZoneAllocationPolicy.
+ INLINE(void Add(const T& element, Zone* zone)) {
+ List<T, ZoneAllocationPolicy>::Add(element, ZoneAllocationPolicy(zone));
+ }
+ INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other,
+ Zone* zone)) {
+ List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
+ }
+ INLINE(void AddAll(const Vector<T>& other, Zone* zone)) {
+ List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
+ }
+ INLINE(void InsertAt(int index, const T& element, Zone* zone)) {
+ List<T, ZoneAllocationPolicy>::InsertAt(index, element,
+ ZoneAllocationPolicy(zone));
+ }
+ INLINE(Vector<T> AddBlock(T value, int count, Zone* zone)) {
+ return List<T, ZoneAllocationPolicy>::AddBlock(value, count,
+ ZoneAllocationPolicy(zone));
+ }
+ INLINE(void Allocate(int length, Zone* zone)) {
+ List<T, ZoneAllocationPolicy>::Allocate(length, ZoneAllocationPolicy(zone));
+ }
+ INLINE(void Initialize(int capacity, Zone* zone)) {
+ List<T, ZoneAllocationPolicy>::Initialize(capacity,
+ ZoneAllocationPolicy(zone));
}
void operator delete(void* pointer) { UNREACHABLE(); }
@@ -208,7 +234,7 @@ class ZoneList: public List<T, ZoneListAllocationPolicy> {
// outer-most scope.
class ZoneScope BASE_EMBEDDED {
public:
- INLINE(ZoneScope(Isolate* isolate, ZoneScopeMode mode));
+ INLINE(ZoneScope(Zone* zone, ZoneScopeMode mode));
virtual ~ZoneScope();
@@ -223,7 +249,7 @@ class ZoneScope BASE_EMBEDDED {
inline static int nesting();
private:
- Isolate* isolate_;
+ Zone* zone_;
ZoneScopeMode mode_;
};
@@ -232,15 +258,15 @@ class ZoneScope BASE_EMBEDDED {
// different configurations of a concrete splay tree (see splay-tree.h).
// The tree itself and all its elements are allocated in the Zone.
template <typename Config>
-class ZoneSplayTree: public SplayTree<Config, ZoneListAllocationPolicy> {
+class ZoneSplayTree: public SplayTree<Config, ZoneAllocationPolicy> {
public:
- ZoneSplayTree()
- : SplayTree<Config, ZoneListAllocationPolicy>() {}
+ explicit ZoneSplayTree(Zone* zone)
+ : SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
~ZoneSplayTree();
};
-typedef TemplateHashMapImpl<ZoneListAllocationPolicy> ZoneHashMap;
+typedef TemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
} } // namespace v8::internal