summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src/x64/code-stubs-x64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/v8/src/x64/code-stubs-x64.cc')
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.cc335
1 files changed, 254 insertions, 81 deletions
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.cc b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
index 7069829..06ce52a 100644
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.cc
+++ b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
@@ -62,9 +62,13 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in rsi.
+ Counters* counters = masm->isolate()->counters();
+
Label gc;
__ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
+ __ IncrementCounter(counters->fast_new_closure_total(), 1);
+
// Get the function info from the stack.
__ movq(rdx, Operand(rsp, 1 * kPointerSize));
@@ -72,36 +76,113 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
- // Compute the function map in the current global context and set that
+ // Compute the function map in the current native context and set that
// as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
+ __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
+ __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
+ __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
__ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
__ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
+ __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
__ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
__ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
__ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ Label check_optimized;
+ Label install_unoptimized;
+ if (FLAG_cache_optimized_code) {
+ __ movq(rbx,
+ FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ testq(rbx, rbx);
+ __ j(not_zero, &check_optimized, Label::kNear);
+ }
+ __ bind(&install_unoptimized);
+ __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
+ rdi); // Initialize with undefined.
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ __ bind(&check_optimized);
+
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
+
+ // rcx holds native context, ebx points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // The optimized code map must never be empty, so check the first elements.
+ Label install_optimized;
+ // Speculatively move code object into edx.
+ __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
+ __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
+ __ j(equal, &install_optimized);
+
+ // Iterate through the rest of map backwards. rdx holds an index.
+ Label loop;
+ Label restore;
+ __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rdx, rdx);
+ __ bind(&loop);
+ // Do not double check first entry.
+ __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
+ __ j(equal, &restore);
+ __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry.
+ __ cmpq(rcx, FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &loop, Label::kNear);
+ // Hit: fetch the optimized code.
+ __ movq(rdx, FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ __ bind(&install_optimized);
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
+
+ // TODO(fschneider): Idea: store proper code pointers in the map and either
+ // unmangle them on marking or do nothing as the whole map is discarded on
+ // major GC anyway.
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+
+ // Now link a function into a list of optimized functions.
+ __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+ __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
+ // No need for write barrier as JSFunction (rax) is in the new space.
+
+ __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
+ // Store JSFunction (rax) into rdx before issuing write barrier as
+ // it clobbers all the registers passed.
+ __ movq(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx,
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+ rdx,
+ rbx,
+ kDontSaveFPRegs);
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
+ __ bind(&restore);
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ __ jmp(&install_unoptimized);
+
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ pop(rcx); // Temporarily remove return address.
@@ -136,12 +217,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
// Copy the global object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
// Copy the qmlglobal object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), rbx);
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)), rbx);
// Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -182,9 +263,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
- // If this block context is nested in the global context we get a smi
+ // If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
+ // canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
@@ -194,7 +275,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(equal, message);
}
__ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
__ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
@@ -204,12 +285,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
// Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
+ __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
// Copy the qmlglobal object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::QML_GLOBAL_INDEX));
- __ movq(ContextOperand(rax, Context::QML_GLOBAL_INDEX), rbx);
+ __ movq(rbx, ContextOperand(rsi, Context::QML_GLOBAL_OBJECT_INDEX));
+ __ movq(ContextOperand(rax, Context::QML_GLOBAL_OBJECT_INDEX), rbx);
// Initialize the rest of the slots to the hole value.
__ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
@@ -1007,8 +1088,8 @@ void BinaryOpStub::GenerateSmiCode(
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
// Arguments to BinaryOpStub are in rdx and rax.
- Register left = rdx;
- Register right = rax;
+ const Register left = rdx;
+ const Register right = rax;
// We only generate heapnumber answers for overflowing calculations
// for the four basic arithmetic operations and logical right shift by 0.
@@ -1050,20 +1131,16 @@ void BinaryOpStub::GenerateSmiCode(
case Token::DIV:
// SmiDiv will not accept left in rdx or right in rax.
- left = rcx;
- right = rbx;
__ movq(rbx, rax);
__ movq(rcx, rdx);
- __ SmiDiv(rax, left, right, &use_fp_on_smis);
+ __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
break;
case Token::MOD:
// SmiMod will not accept left in rdx or right in rax.
- left = rcx;
- right = rbx;
__ movq(rbx, rax);
__ movq(rcx, rdx);
- __ SmiMod(rax, left, right, &use_fp_on_smis);
+ __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
break;
case Token::BIT_OR: {
@@ -1228,11 +1305,9 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
&allocation_failed,
TAG_OBJECT);
// Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ movq(FieldOperand(rax, HeapObject::kMapOffset),
heap_number_map);
__ cvtqsi2sd(xmm0, rbx);
@@ -1980,10 +2055,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
__ bind(&first_smi);
- if (FLAG_debug_code) {
- // Second should be non-smi if we get here.
- __ AbortIfSmi(second);
- }
+ __ AssertNotSmi(second);
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, on_not_smis);
// Convert second to smi, if possible.
@@ -2193,21 +2265,28 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg, while_true, no_multiply;
+ Label no_neg, while_true, while_false;
__ testl(scratch, scratch);
__ j(positive, &no_neg, Label::kNear);
__ negl(scratch);
__ bind(&no_neg);
- __ bind(&while_true);
+ __ j(zero, &while_false, Label::kNear);
__ shrl(scratch, Immediate(1));
- __ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(double_result, double_scratch);
- __ bind(&no_multiply);
+ // Above condition means CF==0 && ZF==0. This means that the
+ // bit that has been shifted out is 0 and the result is not 0.
+ __ j(above, &while_true, Label::kNear);
+ __ movsd(double_result, double_scratch);
+ __ j(zero, &while_false, Label::kNear);
+ __ bind(&while_true);
+ __ shrl(scratch, Immediate(1));
__ mulsd(double_scratch, double_scratch);
+ __ j(above, &while_true, Label::kNear);
+ __ mulsd(double_result, double_scratch);
__ j(not_zero, &while_true);
+ __ bind(&while_false);
// If the exponent is negative, return 1/result.
__ testl(exponent, exponent);
__ j(greater, &done);
@@ -2387,10 +2466,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rax = address of new object(s) (tagged)
// rcx = argument count (untagged)
- // Get the arguments boilerplate from the current (global) context into rdi.
+ // Get the arguments boilerplate from the current native context into rdi.
Label has_mapped_parameters, copy;
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
__ testq(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -2533,7 +2612,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
__ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -2603,9 +2682,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
- // Get the arguments boilerplate from the current (global) context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ // Get the arguments boilerplate from the current native context.
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
__ movq(rdi, Operand(rdi, offset));
@@ -2722,7 +2801,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Calculate number of capture registers (number_of_captures + 1) * 2.
__ leal(rdx, Operand(rdx, rdx, times_1, 2));
// Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize));
__ j(above, &runtime);
// rax: RegExp data (FixedArray)
@@ -2872,30 +2951,37 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
+ static const int kRegExpExecuteArguments = 9;
int argument_slots_on_stack =
masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
__ EnterApiExitFrame(argument_slots_on_stack);
- // Argument 8: Pass current isolate address.
+ // Argument 9: Pass current isolate address.
// __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
// Immediate(ExternalReference::isolate_address()));
__ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
kScratchRegister);
- // Argument 7: Indicate that this is a direct call from JavaScript.
+ // Argument 8: Indicate that this is a direct call from JavaScript.
__ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
Immediate(1));
- // Argument 6: Start (high end) of backtracking stack memory area.
+ // Argument 7: Start (high end) of backtracking stack memory area.
__ movq(kScratchRegister, address_of_regexp_stack_memory_address);
__ movq(r9, Operand(kScratchRegister, 0));
__ movq(kScratchRegister, address_of_regexp_stack_memory_size);
__ addq(r9, Operand(kScratchRegister, 0));
- // Argument 6 passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
__ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ // Argument 6 is passed in r9 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
+ Immediate(0));
+#else
+ __ Set(r9, 0);
#endif
// Argument 5: static offsets vector buffer.
@@ -2903,7 +2989,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference::address_of_static_offsets_vector(isolate));
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
#endif
// First four arguments are passed in registers on both Linux and Windows.
@@ -2968,7 +3054,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
Label exception;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ cmpl(rax, Immediate(1));
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
__ j(equal, &success, Label::kNear);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
__ j(equal, &exception);
@@ -3125,8 +3213,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// r8: Number of array elements as smi.
// Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
__ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
@@ -3157,14 +3245,14 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set length.
__ Integer32ToSmi(rdx, rbx);
__ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with the-hole.
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
+ // Fill contents of fixed-array with undefined.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with hole.
+ // Fill fixed array elements with undefined.
// rax: JSArray.
// rbx: Number of elements in array that remains to be filled, as int32.
// rcx: Start of elements in FixedArray.
- // rdx: the hole.
+ // rdx: undefined.
Label loop;
__ testl(rbx, rbx);
__ bind(&loop);
@@ -3349,13 +3437,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ jmp(&not_user_equal);
__ bind(&user_equal);
-
+
__ pop(rbx); // Return address.
__ push(rax);
__ push(rdx);
__ push(rbx);
__ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
+
__ bind(&not_user_equal);
}
@@ -4691,7 +4779,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label non_ascii, allocated, ascii_data;
__ movl(rcx, r8);
__ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testl(rcx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
@@ -4717,9 +4805,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ testb(rcx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
__ xor_(r8, r9);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
+ __ andb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
+ __ cmpb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
__ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
@@ -5239,7 +5327,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
@@ -5283,7 +5371,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_sequential);
@@ -5887,8 +5975,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
ASSERT(!name.is(r0));
ASSERT(!name.is(r1));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
__ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
__ decl(r0);
@@ -6044,18 +6131,20 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
{ REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiOnlyToObject
- // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
+ // ElementsTransitionGenerator::GenerateMapChangeElementTransition
+ // and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
{ REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+ // ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
+ // FastNewClosureStub::Generate
+ { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -6100,6 +6189,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
}
+bool CodeStub::CanUseFPRegisters() {
+ return true; // Always have SSE2 on x64.
+}
+
+
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
@@ -6232,6 +6326,17 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_object;
+ __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ and_(regs_.scratch0(), regs_.object());
+ __ movq(regs_.scratch1(),
+ Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ subq(regs_.scratch1(), Immediate(1));
+ __ movq(Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset),
+ regs_.scratch1());
+ __ j(negative, &need_incremental);
+
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),
@@ -6323,9 +6428,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(rdi, &double_elements);
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(rax, &smi_element);
- __ CheckFastSmiOnlyElements(rdi, &fast_elements);
+ __ CheckFastSmiElements(rdi, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@@ -6343,7 +6448,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// place.
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
@@ -6357,8 +6462,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK);
__ ret(0);
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
+ // FAST_*_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ SmiToInteger32(kScratchRegister, rcx);
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
@@ -6379,6 +6484,74 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ ProfileEntryHookStub stub;
+ masm->CallStub(&stub);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // Save volatile registers.
+ // Live registers at this point are the same as at the start of any
+ // JS function:
+ // o rdi: the JS function object being called (i.e. ourselves)
+ // o rsi: our context
+ // o rbp: our caller's frame pointer
+ // o rsp: stack pointer (pointing to return address)
+ // o rcx: rcx is zero for method calls and non-zero for function calls.
+#ifdef _WIN64
+ const int kNumSavedRegisters = 1;
+
+ __ push(rcx);
+#else
+ const int kNumSavedRegisters = 3;
+
+ __ push(rcx);
+ __ push(rdi);
+ __ push(rsi);
+#endif
+
+ // Calculate the original stack pointer and store it in the second arg.
+#ifdef _WIN64
+ __ lea(rdx, Operand(rsp, kNumSavedRegisters * kPointerSize));
+#else
+ __ lea(rsi, Operand(rsp, kNumSavedRegisters * kPointerSize));
+#endif
+
+ // Calculate the function address to the first arg.
+#ifdef _WIN64
+ __ movq(rcx, Operand(rdx, 0));
+ __ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
+#else
+ __ movq(rdi, Operand(rsi, 0));
+ __ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
+#endif
+
+ // Call the entry hook function.
+ __ movq(rax, &entry_hook_, RelocInfo::NONE);
+ __ movq(rax, Operand(rax, 0));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+
+ const int kArgumentCount = 2;
+ __ PrepareCallCFunction(kArgumentCount);
+ __ CallCFunction(rax, kArgumentCount);
+
+ // Restore volatile regs.
+#ifdef _WIN64
+ __ pop(rcx);
+#else
+ __ pop(rsi);
+ __ pop(rdi);
+ __ pop(rcx);
+#endif
+
+ __ Ret();
+}
+
#undef __
} } // namespace v8::internal