summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src/arm/stub-cache-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/v8/src/arm/stub-cache-arm.cc')
-rw-r--r--src/3rdparty/v8/src/arm/stub-cache-arm.cc555
1 files changed, 417 insertions, 138 deletions
diff --git a/src/3rdparty/v8/src/arm/stub-cache-arm.cc b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
index 49c0982..9fc39d4 100644
--- a/src/3rdparty/v8/src/arm/stub-cache-arm.cc
+++ b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
@@ -283,11 +283,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
__ ldr(prototype,
- FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ __ ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
__ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ ldr(prototype,
@@ -304,13 +305,14 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
- __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ Move(ip, isolate->global());
+ __ ldr(prototype,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ Move(ip, isolate->global_object());
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
- JSFunction::cast(isolate->global_context()->get(index)));
+ JSFunction::cast(isolate->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -435,22 +437,59 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
int index,
Handle<Map> transition,
+ Handle<String> name,
Register receiver_reg,
Register name_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss_label) {
// r0 : value
Label exit;
+ LookupResult lookup(masm->isolate());
+ object->Lookup(*name, &lookup);
+ if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
+ // In sloppy mode, we could just return the value and be done. However, we
+ // might be in strict mode, where we have to throw. Since we cannot tell,
+ // go into slow case unconditionally.
+ __ jmp(miss_label);
+ return;
+ }
+
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+ __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Check that we are allowed to write this.
+ if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ JSObject* holder;
+ if (lookup.IsFound()) {
+ holder = lookup.holder();
+ } else {
+ // Find the top object.
+ holder = *object;
+ do {
+ holder = JSObject::cast(holder->GetPrototype());
+ } while (holder->GetPrototype()->IsJSObject());
+ }
+ // We need an extra register, push
+ __ push(name_reg);
+ Label miss_pop, done_check;
+ CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
+ scratch1, scratch2, name, &miss_pop);
+ __ jmp(&done_check);
+ __ bind(&miss_pop);
+ __ pop(name_reg);
+ __ jmp(miss_label);
+ __ bind(&done_check);
+ __ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@@ -473,10 +512,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
if (!transition.is_null()) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ mov(ip, Operand(transition));
- __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ // Update the map of the object.
+ __ mov(scratch1, Operand(transition));
+ __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field and pass the now unused
+ // name_reg as scratch register.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ name_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
}
// Adjust for the number of properties stored in the object. Even in the
@@ -498,15 +547,16 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ RecordWriteField(receiver_reg,
offset,
name_reg,
- scratch,
+ scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(r0, FieldMemOperand(scratch, offset));
+ __ ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ str(r0, FieldMemOperand(scratch1, offset));
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(r0, &exit);
@@ -514,7 +564,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, r0);
- __ RecordWriteField(scratch,
+ __ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
@@ -938,8 +988,8 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register fval,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
@@ -1182,6 +1232,45 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
+void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Handle<AccessorInfo> callback,
+ Handle<String> name,
+ Label* miss) {
+ ASSERT(!receiver.is(scratch1));
+ ASSERT(!receiver.is(scratch2));
+ ASSERT(!receiver.is(scratch3));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch1;
+ __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ miss,
+ &probe_done,
+ dictionary,
+ name_reg,
+ scratch2,
+ scratch3);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3;
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
+ __ cmp(scratch2, Operand(callback));
+ __ b(ne, miss);
+}
+
+
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@@ -1189,6 +1278,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
+ Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@@ -1199,6 +1289,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ GenerateDictionaryLoadCallback(
+ reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
+ }
+
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
@@ -1255,12 +1350,13 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
- compile_followup_inline =
- AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
}
}
@@ -1328,7 +1424,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
- if (lookup->type() == FIELD) {
+ if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), r0, holder_reg,
@@ -1477,7 +1573,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -1581,16 +1677,29 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
+ __ CheckFastSmiElements(r3, r7, &call_builtin);
// edx: receiver
// r3: map
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ Label try_holey_map;
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
r7,
+ &try_holey_map);
+ __ mov(r2, receiver);
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
+ __ jmp(&fast_object);
+
+ __ bind(&try_holey_map);
+ __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
+ FAST_HOLEY_ELEMENTS,
+ r3,
+ r7,
&call_builtin);
__ mov(r2, receiver);
- ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ ElementsTransitionGenerator::
+ GenerateMapChangeElementsTransition(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
@@ -2009,7 +2118,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2027,11 +2136,11 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(VFP3)) {
+ if (!CpuFeatures::IsSupported(VFP2)) {
return Handle<Code>::null();
}
- CpuFeatures::Scope scope_vfp3(VFP3);
+ CpuFeatures::Scope scope_vfp2(VFP2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -2155,7 +2264,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2254,7 +2363,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+ return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@@ -2471,7 +2580,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2529,7 +2638,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2545,20 +2654,29 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
// -----------------------------------
Label miss;
- GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ r1, r2, r3, r4,
+ &miss);
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<AccessorInfo> callback,
- Handle<String> name) {
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2566,19 +2684,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- lr : return address
// -----------------------------------
Label miss;
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(r1, &miss);
+ CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
- // Check that the map of the object hasn't changed.
- __ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(r1); // receiver
__ mov(ip, Operand(callback)); // callback info
@@ -2596,7 +2707,80 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(r0);
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ __ Push(r1, r0);
+ ParameterCount actual(1);
+ __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(r0);
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(r1, &miss);
+ CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
+
+ GenerateStoreViaSetter(masm(), setter);
+
+ __ bind(&miss);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2641,7 +2825,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2687,7 +2871,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2722,7 +2906,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@@ -2742,7 +2926,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2757,13 +2941,76 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -- lr : return address
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
+ GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, r5, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ __ push(r0);
+ ParameterCount actual(0);
+ __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the maps haven't changed.
+ __ JumpIfSmi(r0, &miss);
+ CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
+
+ GenerateLoadViaGetter(masm(), getter);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2783,7 +3030,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2805,7 +3052,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2847,7 +3094,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, name);
+ return GetCode(Code::NORMAL, name);
}
@@ -2870,7 +3117,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(FIELD, name);
+ return GetCode(Code::FIELD, name);
}
@@ -2890,12 +3137,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(r0, Operand(name));
__ b(ne, &miss);
- GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
+ GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, r5, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2920,7 +3167,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(CONSTANT_FUNCTION, name);
+ return GetCode(Code::CONSTANT_FUNCTION, name);
}
@@ -2946,7 +3193,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(INTERCEPTOR, name);
+ return GetCode(Code::INTERCEPTOR, name);
}
@@ -2967,7 +3214,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -2993,7 +3240,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3018,7 +3265,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
__ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
- return GetCode(CALLBACKS, name);
+ return GetCode(Code::CALLBACKS, name);
}
@@ -3038,7 +3285,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3066,7 +3313,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3091,7 +3338,13 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
// r3 is used as scratch register. r1 and r2 keep their values if a jump to
// the miss label is generated.
- GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ name,
+ r2, r1, r3, r4,
+ &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
@@ -3099,7 +3352,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+ return GetCode(transition.is_null()
+ ? Code::FIELD
+ : Code::MAP_TRANSITION, name);
}
@@ -3123,7 +3378,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string());
+ return GetCode(Code::NORMAL, factory()->empty_string());
}
@@ -3162,7 +3417,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@@ -3212,7 +3467,13 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// r1: constructor function
// r2: initial map
// r7: undefined
+ ASSERT(function->has_initial_map());
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+#ifdef DEBUG
+ int instance_size = function->initial_map()->instance_size();
+ __ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
+ __ Check(eq, "Instance size of initial map changed.");
+#endif
__ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
@@ -3270,7 +3531,6 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -3372,8 +3632,11 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3388,9 +3651,10 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch0,
Register scratch1,
DwVfpRegister double_scratch0,
+ DwVfpRegister double_scratch1,
Label* fail) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
@@ -3404,13 +3668,12 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
- double_scratch0.low(),
- double_scratch0,
scratch0,
+ double_scratch0,
scratch1,
+ double_scratch1,
kCheckForInexactConversion);
__ b(ne, fail);
- __ vmov(scratch0, double_scratch0.low());
__ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0);
__ bind(&key_ok);
@@ -3438,7 +3701,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r3: elements array
@@ -3476,8 +3739,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_FLOAT_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
@@ -3485,8 +3748,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case EXTERNAL_DOUBLE_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 2));
__ vldr(d0, r2, 0);
} else {
@@ -3497,8 +3760,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3526,22 +3792,28 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret();
__ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ add(r0, r5, Operand(kHeapObjectTag));
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ vstr(d0, r5, HeapNumber::kValueOffset);
__ Ret();
} else {
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ mov(r0, r5);
Register dst1 = r1;
Register dst2 = r3;
FloatingPointHelper::Destination dest =
@@ -3562,8 +3834,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
@@ -3577,13 +3849,12 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_u32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
@@ -3615,7 +3886,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@@ -3626,25 +3897,24 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_f32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
@@ -3694,24 +3964,23 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret();
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
__ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
@@ -3769,7 +4038,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -3820,7 +4089,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
@@ -3830,7 +4099,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
d0, r6, r7, // These are: double_dst, dst1, dst2.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
+ CpuFeatures::Scope scope(VFP2);
__ vstr(d0, r3, 0);
} else {
__ str(r6, MemOperand(r3, 0));
@@ -3838,8 +4107,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3862,8 +4134,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
@@ -3902,8 +4174,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -4042,8 +4317,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -4095,7 +4373,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
// Get the elements array.
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
@@ -4147,7 +4425,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
// Get the elements array.
__ ldr(elements_reg,
@@ -4169,7 +4447,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber);
+ heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
// Don't need to reload the upper 32 bits of the double, it's already in
// scratch.
@@ -4223,9 +4501,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
@@ -4253,7 +4531,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (IsFastSmiElementsKind(elements_kind)) {
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4263,7 +4541,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
} else {
- ASSERT(elements_kind == FAST_ELEMENTS);
+ ASSERT(IsFastObjectElementsKind(elements_kind));
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4391,7 +4669,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4416,6 +4694,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
+ // All registers after this are overwritten.
elements_reg,
scratch1,
scratch2,