summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src/arm/stub-cache-arm.cc
diff options
context:
space:
mode:
authorPeter Varga <pvarga@inf.u-szeged.hu>2012-04-17 11:03:39 +0200
committerQt by Nokia <qt-info@nokia.com>2012-04-26 08:55:55 +0200
commit227e72d1bba70d518639996aab4146b060507cd6 (patch)
tree723564e760e94a7d205a3d3300c44192e1d2f2e1 /src/3rdparty/v8/src/arm/stub-cache-arm.cc
parent5b1fcb16da41f711f27f2e8ff84de8b70a30b645 (diff)
Updated V8 from git://github.com/v8/v8.git to 57f8959fb264354ba1a2e5118db512f588917061
Update V8 source to version 3.10.1. * Added optimizations and stability improvements on all platforms. * Various performance improvements. * Cleanup ScopeInfo and SerializedScopeInfo. * Introduce extended mode. * Implemented performance improvements to the incremental garbage collector. * Fixed handling of arrays in DefineOwnProperty. (issue 1756) * Fixed GCC 4.7 warnings. * Performance improvements for large Smi-only arrays. * Reduce the space used by the stack for the profiling thread. * Reduced memory use immediately after starting V8. * Fixed VFP detection through compiler defines. (issue 1996) * Remove static initializers in v8. (issue 1859) * Optimized boot-up memory use. * Optimized regular expressions. Change-Id: I2dad3092612de279179950dae4dd43daf0463a9f Reviewed-by: Kent Hansen <kent.hansen@nokia.com>
Diffstat (limited to 'src/3rdparty/v8/src/arm/stub-cache-arm.cc')
-rw-r--r--src/3rdparty/v8/src/arm/stub-cache-arm.cc506
1 files changed, 357 insertions, 149 deletions
diff --git a/src/3rdparty/v8/src/arm/stub-cache-arm.cc b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
index bf6f085..cfd93bc 100644
--- a/src/3rdparty/v8/src/arm/stub-cache-arm.cc
+++ b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,47 +43,83 @@ static void ProbeTable(Isolate* isolate,
MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
+ Register receiver,
Register name,
+ // Number of the cache entry, not scaled.
Register offset,
Register scratch,
- Register scratch2) {
+ Register scratch2,
+ Register offset_scratch) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+ uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
// Check the relative positions of the address fields.
ASSERT(value_off_addr > key_off_addr);
ASSERT((value_off_addr - key_off_addr) % 4 == 0);
ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+ ASSERT(map_off_addr > key_off_addr);
+ ASSERT((map_off_addr - key_off_addr) % 4 == 0);
+ ASSERT((map_off_addr - key_off_addr) < (256 * 4));
Label miss;
- Register offsets_base_addr = scratch;
+ Register base_addr = scratch;
+ scratch = no_reg;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ add(offset_scratch, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ mov(base_addr, Operand(key_offset));
+ __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
// Check that the key in the entry matches the name.
- __ mov(offsets_base_addr, Operand(key_offset));
- __ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1));
+ __ ldr(ip, MemOperand(base_addr, 0));
__ cmp(name, ip);
__ b(ne, &miss);
+ // Check the map matches.
+ __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ cmp(ip, scratch2);
+ __ b(ne, &miss);
+
// Get the code entry from the cache.
- __ add(offsets_base_addr, offsets_base_addr,
- Operand(value_off_addr - key_off_addr));
- __ ldr(scratch2, MemOperand(offsets_base_addr, offset, LSL, 1));
+ Register code = scratch2;
+ scratch2 = no_reg;
+ __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
- __ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
- __ bic(scratch2, scratch2, Operand(Code::kFlagsNotUsedInLookup));
- __ cmp(scratch2, Operand(flags));
+ Register flags_reg = base_addr;
+ base_addr = no_reg;
+ __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+ // It's a nice optimization if this constant is encodable in the bic insn.
+
+ uint32_t mask = Code::kFlagsNotUsedInLookup;
+ ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
+ __ bic(flags_reg, flags_reg, Operand(mask));
+ // Using cmn and the negative instead of cmp means we can use movw.
+ if (flags < 0) {
+ __ cmn(flags_reg, Operand(-flags));
+ } else {
+ __ cmp(flags_reg, Operand(flags));
+ }
__ b(ne, &miss);
- // Re-load code entry from cache.
- __ ldr(offset, MemOperand(offsets_base_addr, offset, LSL, 1));
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
// Jump to the first instruction in the code stub.
- __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(offset);
+ __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
// Miss: fall through.
__ bind(&miss);
@@ -155,13 +191,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register name,
Register scratch,
Register extra,
- Register extra2) {
+ Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
- // Make sure that code is valid. The shifting code relies on the
- // entry size being 8.
- ASSERT(sizeof(Entry) == 8);
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 12.
+ ASSERT(sizeof(Entry) == 12);
// Make sure the flags does not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -181,6 +218,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!scratch.is(no_reg));
ASSERT(!extra.is(no_reg));
ASSERT(!extra2.is(no_reg));
+ ASSERT(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+ extra2, extra3);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
@@ -189,27 +231,51 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
- __ eor(scratch, scratch, Operand(flags));
- __ and_(scratch,
- scratch,
- Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ uint32_t mask = kPrimaryTableSize - 1;
+ // We shift out the last two bits because they are not part of the hash and
+ // they are always 01 for maps.
+ __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
+ // Mask down the eor argument to the minimum to keep the immediate
+ // ARM-encodable.
+ __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
+ // Prefer and_ to ubfx here because ubfx takes 2 cycles.
+ __ and_(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+ ProbeTable(isolate,
+ masm,
+ flags,
+ kPrimary,
+ receiver,
+ name,
+ scratch,
+ extra,
+ extra2,
+ extra3);
// Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name));
- __ add(scratch, scratch, Operand(flags));
- __ and_(scratch,
- scratch,
- Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+ __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
+ uint32_t mask2 = kSecondaryTableSize - 1;
+ __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
+ __ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+ ProbeTable(isolate,
+ masm,
+ flags,
+ kSecondary,
+ receiver,
+ name,
+ scratch,
+ extra,
+ extra2,
+ extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+ extra2, extra3);
}
@@ -376,13 +442,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// r0 : value
Label exit;
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver_reg, miss_label);
-
- // Check that the map of the receiver hasn't changed.
- __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(Handle<Map>(object->map())));
- __ b(ne, miss_label);
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -566,16 +628,16 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee js function
+ // -- sp[4] : callee JS function
// -- sp[8] : call data
- // -- sp[12] : last js argument
+ // -- sp[12] : last JS argument
// -- ...
- // -- sp[(argc + 3) * 4] : first js argument
+ // -- sp[(argc + 3) * 4] : first JS argument
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ mov(r5, Operand(function));
+ __ LoadHeapObject(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
@@ -587,7 +649,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else {
__ Move(r6, call_data);
}
- // Store js function and call data.
+ // Store JS function and call data.
__ stm(ib, sp, r5.bit() | r6.bit());
// r2 points to call data as expected by Arguments
@@ -742,7 +804,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, call_kind);
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -1019,10 +1081,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Handle<Map> current_map(current->map());
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(current_map));
- // Branch on the result of the map check.
- __ b(ne, miss);
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@@ -1053,9 +1114,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
- __ b(ne, miss);
+ __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
+ DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1099,7 +1159,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1110,7 +1170,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ mov(r0, Operand(value));
+ __ LoadHeapObject(r0, value);
__ Ret();
}
@@ -1150,7 +1210,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
__ EnterExitFrame(false, kApiStackSpace);
// Create AccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object **args_) as the data.
+ // scratch2 (internal::Object** args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
@@ -1185,7 +1245,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
@@ -1327,14 +1387,8 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
// Get the receiver from the stack.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(r0, miss);
- }
-
// Check that the maps haven't changed.
+ __ JumpIfSmi(r0, miss);
CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
}
@@ -1451,28 +1505,30 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Ret();
} else {
Label call_builtin;
- Register elements = r3;
- Register end_elements = r5;
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements;
+ Register elements = r6;
+ Register end_elements = r5;
+ // Get the elements array of the object.
+ __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ r0,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));
- // Get the element's length.
+ // Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
@@ -1487,7 +1543,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- // Push the element.
+ // Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@@ -1502,13 +1558,33 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ bind(&with_write_barrier);
- __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r6, r6, &call_builtin);
+ __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
+ Label fast_object, not_fast_object;
+ __ CheckFastObjectElements(r3, r7, &not_fast_object);
+ __ jmp(&fast_object);
+ // In case of fast smi-only, convert to fast object, otherwise bail out.
+ __ bind(&not_fast_object);
+ __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
+ // edx: receiver
+ // r3: map
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ r3,
+ r7,
+ &call_builtin);
+ __ mov(r2, receiver);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ __ bind(&fast_object);
+ } else {
+ __ CheckFastObjectElements(r3, r3, &call_builtin);
+ }
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- // Push the element.
+ // Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@@ -1554,25 +1630,25 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r6, MemOperand(r7));
- __ cmp(end_elements, r6);
+ __ ldr(r3, MemOperand(r7));
+ __ cmp(end_elements, r3);
__ b(ne, &call_builtin);
__ mov(r9, Operand(new_space_allocation_limit));
__ ldr(r9, MemOperand(r9));
- __ add(r6, r6, Operand(kAllocationDelta * kPointerSize));
- __ cmp(r6, r9);
+ __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
+ __ cmp(r3, r9);
__ b(hi, &call_builtin);
// We fit and could grow elements.
// Update new_space_allocation_top.
- __ str(r6, MemOperand(r7));
+ __ str(r3, MemOperand(r7));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
- __ str(r6, MemOperand(end_elements, i * kPointerSize));
+ __ str(r3, MemOperand(end_elements, i * kPointerSize));
}
// Update elements' and array's sizes.
@@ -1654,7 +1730,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on.
__ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ cmp(r0, r6);
__ b(eq, &call_builtin);
@@ -1662,7 +1738,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Fill with the hole.
- __ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Drop(argc + 1);
__ Ret();
@@ -1727,7 +1803,6 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Register receiver = r1;
Register index = r4;
- Register scratch = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1738,7 +1813,6 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
StringCharCodeAtGenerator generator(receiver,
index,
- scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1809,8 +1883,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Register receiver = r0;
Register index = r4;
- Register scratch1 = r1;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1821,8 +1894,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
StringCharAtGenerator generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1914,7 +1986,8 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
@@ -1993,7 +2066,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ vmrs(r3);
// Set custom FPCSR:
// - Set rounding mode to "Round towards Minus Infinity"
- // (ie bits [23:22] = 0b10).
+ // (i.e. bits [23:22] = 0b10).
// - Clear vfp cumulative exception flags (bits [3:0]).
// - Make sure Flush-to-zero mode control bit is unset (bit 22).
__ bic(r9, r3,
@@ -2059,7 +2132,8 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
@@ -2157,7 +2231,8 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
@@ -2269,7 +2344,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case STRING_CHECK:
- if (function->IsBuiltin() || function->shared()->strict_mode()) {
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
// Check that the object is a two-byte string or a symbol.
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, &miss);
@@ -2287,7 +2362,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case NUMBER_CHECK:
- if (function->IsBuiltin() || function->shared()->strict_mode()) {
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a smi or a heap number.
__ JumpIfSmi(r1, &fast);
@@ -2308,7 +2383,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case BOOLEAN_CHECK:
- if (function->IsBuiltin() || function->shared()->strict_mode()) {
+ if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
Label fast;
// Check that the object is a boolean.
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
@@ -2335,7 +2410,8 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
@@ -2415,7 +2491,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
- // Setup the context (function already in r1).
+ // Set up the context (function already in r1).
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
@@ -2476,13 +2552,9 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(r1, &miss);
-
// Check that the map of the object hasn't changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
+ __ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2524,13 +2596,9 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(r1, &miss);
-
// Check that the map of the object hasn't changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(receiver->map())));
- __ b(ne, &miss);
+ __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
@@ -2591,15 +2659,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
-
- __ mov(r1, r0);
- __ RecordWriteField(r4,
- JSGlobalPropertyCell::kValueOffset,
- r1,
- r2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET);
+ // Cells are always rescanned, so no write barrier here.
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
@@ -2694,7 +2754,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
@@ -2747,14 +2807,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// -----------------------------------
Label miss;
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(r0, &miss);
- }
-
// Check that the map of the global has not changed.
+ __ JumpIfSmi(r0, &miss);
CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
// Get the value from the cell.
@@ -2834,7 +2888,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3046,7 +3100,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+ KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
__ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
@@ -4091,7 +4145,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4100,13 +4155,16 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
- Register scratch = r3;
- Register elements_reg = r4;
+ Register scratch = r4;
+ Register elements_reg = r3;
+ Register length_reg = r5;
+ Register scratch2 = r6;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4114,16 +4172,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+ }
// Check that the key is within bounds.
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
if (is_js_array) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
@@ -4131,10 +4186,21 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
}
// Compare smis.
__ cmp(key_reg, scratch);
- __ b(hs, &miss_force_generic);
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ b(hs, &grow);
+ } else {
+ __ b(hs, &miss_force_generic);
+ }
+
+ // Make sure elements is a fast element array, not 'cow'.
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+ __ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4172,12 +4238,80 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags already set by previous compare.
+ __ b(ne, &miss_force_generic);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ ldr(length_reg,
+ FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+ __ b(ne, &check_capacity);
+
+ int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
+ TAG_OBJECT);
+
+ __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+ __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+ __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+ __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
+ }
+
+ // Store the element at index zero.
+ __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
+
+ // Install the new backing store in the JSArray.
+ __ str(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+ scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ Ret();
+
+ __ bind(&check_capacity);
+ // Check for cow elements, in general they are not handled by this stub
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedCOWArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ cmp(length_reg, scratch);
+ __ b(hs, &slow);
+
+ // Grow the array and finish the store.
+ __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
- bool is_js_array) {
+ bool is_js_array,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4187,7 +4321,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
@@ -4197,6 +4332,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = r5;
Register scratch3 = r6;
Register scratch4 = r7;
+ Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4215,8 +4351,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
__ cmp(key_reg, scratch1);
- __ b(hs, &miss_force_generic);
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ b(hs, &grow);
+ } else {
+ __ b(hs, &miss_force_generic);
+ }
+ __ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
@@ -4237,6 +4378,73 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags already set by previous compare.
+ __ b(ne, &miss_force_generic);
+
+ // Transition on values that can't be stored in a FixedDoubleArray.
+ Label value_is_smi;
+ __ JumpIfSmi(value_reg, &value_is_smi);
+ __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+ __ b(ne, &transition_elements_kind);
+ __ bind(&value_is_smi);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ ldr(length_reg,
+ FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+ __ b(ne, &check_capacity);
+
+ int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
+ TAG_OBJECT);
+
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
+ __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
+ __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+ __ mov(scratch1,
+ Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ str(scratch1,
+ FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+
+ // Install the new backing store in the JSArray.
+ __ str(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+ scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&check_capacity);
+ // Make sure that the backing store can hold additional elements.
+ __ ldr(scratch1,
+ FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+ __ cmp(length_reg, scratch1);
+ __ b(hs, &slow);
+
+ // Grow the array and finish the store.
+ __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ }
}