summaryrefslogtreecommitdiffstats
path: root/chromium/v8/src/mips
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/mips')
-rw-r--r--chromium/v8/src/mips/OWNERS3
-rw-r--r--chromium/v8/src/mips/assembler-mips-inl.h75
-rw-r--r--chromium/v8/src/mips/assembler-mips.cc177
-rw-r--r--chromium/v8/src/mips/assembler-mips.h140
-rw-r--r--chromium/v8/src/mips/builtins-mips.cc409
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.cc2721
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.h162
-rw-r--r--chromium/v8/src/mips/codegen-mips.cc705
-rw-r--r--chromium/v8/src/mips/codegen-mips.h72
-rw-r--r--chromium/v8/src/mips/constants-mips.cc57
-rw-r--r--chromium/v8/src/mips/constants-mips.h65
-rw-r--r--chromium/v8/src/mips/cpu-mips.cc45
-rw-r--r--chromium/v8/src/mips/debug-mips.cc123
-rw-r--r--chromium/v8/src/mips/deoptimizer-mips.cc87
-rw-r--r--chromium/v8/src/mips/disasm-mips.cc84
-rw-r--r--chromium/v8/src/mips/frames-mips.cc54
-rw-r--r--chromium/v8/src/mips/frames-mips.h36
-rw-r--r--chromium/v8/src/mips/full-codegen-mips.cc1058
-rw-r--r--chromium/v8/src/mips/ic-mips.cc416
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.cc1717
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.h104
-rw-r--r--chromium/v8/src/mips/lithium-gap-resolver-mips.cc37
-rw-r--r--chromium/v8/src/mips/lithium-gap-resolver-mips.h31
-rw-r--r--chromium/v8/src/mips/lithium-mips.cc1000
-rw-r--r--chromium/v8/src/mips/lithium-mips.h686
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.cc768
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.h198
-rw-r--r--chromium/v8/src/mips/regexp-macro-assembler-mips.cc52
-rw-r--r--chromium/v8/src/mips/regexp-macro-assembler-mips.h37
-rw-r--r--chromium/v8/src/mips/simulator-mips.cc236
-rw-r--r--chromium/v8/src/mips/simulator-mips.h58
-rw-r--r--chromium/v8/src/mips/stub-cache-mips.cc1807
32 files changed, 5316 insertions, 7904 deletions
diff --git a/chromium/v8/src/mips/OWNERS b/chromium/v8/src/mips/OWNERS
index 38473b56d1f..2dc1d77d367 100644
--- a/chromium/v8/src/mips/OWNERS
+++ b/chromium/v8/src/mips/OWNERS
@@ -1,2 +1,5 @@
plind44@gmail.com
gergely@homejinni.com
+palfia@homejinni.com
+kilvadyb@homejinni.com
+Dusan.Milosavljevic@rt-rk.com
diff --git a/chromium/v8/src/mips/assembler-mips-inl.h b/chromium/v8/src/mips/assembler-mips-inl.h
index 514b3aaa4f0..eec19a671a0 100644
--- a/chromium/v8/src/mips/assembler-mips-inl.h
+++ b/chromium/v8/src/mips/assembler-mips-inl.h
@@ -37,15 +37,19 @@
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
-#include "mips/assembler-mips.h"
+#include "src/mips/assembler-mips.h"
-#include "cpu.h"
-#include "debug.h"
+#include "src/cpu.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
+
+bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+
+
// -----------------------------------------------------------------------------
// Operand and MemOperand.
@@ -108,7 +112,7 @@ int FPURegister::ToAllocationIndex(FPURegister reg) {
// -----------------------------------------------------------------------------
// RelocInfo.
-void RelocInfo::apply(intptr_t delta) {
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
if (IsCodeTarget(rmode_)) {
uint32_t scope1 = (uint32_t) target_address() & ~kImm28Mask;
uint32_t scope2 = reinterpret_cast<uint32_t>(pc_) & ~kImm28Mask;
@@ -128,7 +132,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -156,15 +160,24 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@@ -179,22 +192,26 @@ Address Assembler::target_address_from_return_address(Address pc) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
+ Assembler::target_address_at(pc_, host_)));
}
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -205,7 +222,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -216,9 +233,11 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
void RelocInfo::set_target_runtime_entry(Address target,
- WriteBarrierMode mode) {
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsRuntimeEntry(rmode_));
- if (target_address() != target) set_target_address(target, mode);
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
@@ -235,11 +254,13 @@ Cell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -248,7 +269,7 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
}
-static const int kNoCodeAgeSequenceLength = 7;
+static const int kNoCodeAgeSequenceLength = 7 * Assembler::kInstrSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
@@ -260,13 +281,15 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + Assembler::kInstrSize));
+ Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
}
-void RelocInfo::set_code_age_stub(Code* stub) {
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
+ host_,
stub->instruction_start());
}
@@ -277,7 +300,7 @@ Address RelocInfo::call_address() {
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -287,7 +310,7 @@ void RelocInfo::set_call_address(Address target) {
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -318,7 +341,7 @@ void RelocInfo::WipeOut() {
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, NULL);
+ Assembler::set_target_address_at(pc_, host_, NULL);
}
@@ -353,14 +376,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
@@ -380,14 +401,12 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
diff --git a/chromium/v8/src/mips/assembler-mips.cc b/chromium/v8/src/mips/assembler-mips.cc
index 9aed3bd4aaa..e4bebfee4ba 100644
--- a/chromium/v8/src/mips/assembler-mips.cc
+++ b/chromium/v8/src/mips/assembler-mips.cc
@@ -33,48 +33,32 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "mips/assembler-mips-inl.h"
-#include "serialize.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
-unsigned CpuFeatures::cross_compile_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
// can be defined to enable FPU instructions when building the
// snapshot.
-static uint64_t CpuFeaturesImpliedByCompiler() {
- uint64_t answer = 0;
+static unsigned CpuFeaturesImpliedByCompiler() {
+ unsigned answer = 0;
#ifdef CAN_USE_FPU_INSTRUCTIONS
- answer |= static_cast<uint64_t>(1) << FPU;
+ answer |= 1u << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
-#ifdef __mips__
// If the compiler is allowed to use FPU then we can use FPU too in our code
// generation even when generating snapshots. This won't work for cross
// compilation.
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
- answer |= static_cast<uint64_t>(1) << FPU;
-#endif // defined(__mips_hard_float) && __mips_hard_float != 0
-#endif // def __mips__
+#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
+ answer |= 1u << FPU;
+#endif
return answer;
}
@@ -102,42 +86,29 @@ const char* DoubleRegister::AllocationIndexToString(int index) {
}
-void CpuFeatures::Probe() {
- unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
- CpuFeaturesImpliedByCompiler());
- ASSERT(supported_ == 0 || supported_ == standard_features);
-#ifdef DEBUG
- initialized_ = true;
-#endif
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= CpuFeaturesImpliedByCompiler();
- // Get the features implied by the OS and the compiler settings. This is the
- // minimal set of features which is also allowed for generated code in the
- // snapshot.
- supported_ |= standard_features;
-
- if (Serializer::enabled()) {
- // No probing for features if we might serialize (generate snapshot).
- return;
- }
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
// If the compiler is allowed to use fpu then we can use fpu too in our
// code generation.
-#if !defined(__mips__)
+#ifndef __mips__
// For the simulator build, use FPU.
- supported_ |= static_cast<uint64_t>(1) << FPU;
+ supported_ |= 1u << FPU;
#else
- // Probe for additional features not already known to be available.
+ // Probe for additional features at runtime.
CPU cpu;
- if (cpu.has_fpu()) {
- // This implementation also sets the FPU flags if
- // runtime detection of FPU returns true.
- supported_ |= static_cast<uint64_t>(1) << FPU;
- found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU;
- }
+ if (cpu.has_fpu()) supported_ |= 1u << FPU;
#endif
}
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
int ToNumber(Register reg) {
ASSERT(reg.is_valid());
const int kNumbers[] = {
@@ -213,6 +184,11 @@ bool RelocInfo::IsCodedSpecially() {
}
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
// Patch the code at the current address with the supplied instructions.
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -260,6 +236,12 @@ MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
}
+MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
+ OffsetAddend offset_addend) : Operand(rm) {
+ offset_ = unit * multiplier + offset_addend;
+}
+
+
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -267,28 +249,30 @@ static const int kNegOffset = 0x00008000;
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift) | (kPointerSize & kImm16Mask);
+ | (kRegister_sp_Code << kRtShift)
+ | (kPointerSize & kImm16Mask); // NOLINT
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift) | (-kPointerSize & kImm16Mask);
+ | (kRegister_sp_Code << kRtShift)
+ | (-kPointerSize & kImm16Mask); // NOLINT
// sw(r, MemOperand(sp, 0))
const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask);
+ | (0 & kImm16Mask); // NOLINT
// lw(r, MemOperand(sp, 0))
const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask);
+ | (0 & kImm16Mask); // NOLINT
const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask);
+ | (0 & kImm16Mask); // NOLINT
const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask);
+ | (0 & kImm16Mask); // NOLINT
const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask);
+ | (kNegOffset & kImm16Mask); // NOLINT
const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask);
+ | (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xffe00000;
@@ -307,11 +291,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
trampoline_pool_blocked_nesting_ = 0;
// We leave space (16 * kTrampolineSlotsSize)
// for BlockTrampolinePoolScope buffer.
- next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ next_buffer_check_ = FLAG_force_long_branches
+ ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
- trampoline_emitted_ = false;
+ trampoline_emitted_ = FLAG_force_long_branches;
unbound_labels_count_ = 0;
block_buffer_growth_ = false;
@@ -1199,7 +1184,7 @@ void Assembler::jal_or_jalr(int32_t target, Register rs) {
}
-//-------Data-processing-instructions---------
+// -------Data-processing-instructions---------
// Arithmetic.
@@ -1342,7 +1327,7 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) {
}
-//------------Memory-instructions-------------
+// ------------Memory-instructions-------------
// Helper for base-reg + offset, when offset is larger than int16.
void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
@@ -1459,7 +1444,7 @@ void Assembler::lui(Register rd, int32_t j) {
}
-//-------------Misc-instructions--------------
+// -------------Misc-instructions--------------
// Break / Trap instructions.
void Assembler::break_(uint32_t code, bool break_as_stop) {
@@ -1623,7 +1608,16 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
}
-//--------Coprocessor-instructions----------------
+void Assembler::pref(int32_t hint, const MemOperand& rs) {
+ ASSERT(kArchVariant != kLoongson);
+ ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
+ Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
+ | (rs.offset_);
+ emit(instr);
+}
+
+
+// --------Coprocessor-instructions----------------
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
@@ -1634,10 +1628,12 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
}
@@ -1649,10 +1645,12 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
}
@@ -1678,7 +1676,7 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
- OS::MemCopy(&i, &d, 8);
+ memcpy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
@@ -1993,9 +1991,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- OS::MemMove(desc.buffer, buffer_, desc.instr_size);
- OS::MemMove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ MemMove(desc.buffer, buffer_, desc.instr_size);
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
@@ -2053,15 +2051,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
}
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
@@ -2175,7 +2167,9 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
Instr instr2 = instr_at(pc + kInstrSize);
uint32_t rt_code = GetRtField(instr2);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -2269,7 +2263,9 @@ void Assembler::set_target_address_at(Address pc, Address target) {
patched_jump = true;
}
- CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
+ }
}
@@ -2306,6 +2302,21 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
}
}
+
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/mips/assembler-mips.h b/chromium/v8/src/mips/assembler-mips.h
index d9ef46cd014..2ba3ef7166f 100644
--- a/chromium/v8/src/mips/assembler-mips.h
+++ b/chromium/v8/src/mips/assembler-mips.h
@@ -37,9 +37,10 @@
#define V8_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
-#include "assembler.h"
-#include "constants-mips.h"
-#include "serialize.h"
+
+#include "src/assembler.h"
+#include "src/mips/constants-mips.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -76,6 +77,16 @@ struct Register {
static const int kSizeInBytes = 4;
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static const int kMantissaOffset = 0;
+ static const int kExponentOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static const int kMantissaOffset = 4;
+ static const int kExponentOffset = 0;
+#else
+#error Unknown endianness
+#endif
+
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
@@ -386,7 +397,15 @@ class Operand BASE_EMBEDDED {
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
+ // Immediate value attached to offset.
+ enum OffsetAddend {
+ offset_minus_one = -1,
+ offset_zero = 0
+ };
+
explicit MemOperand(Register rn, int32_t offset = 0);
+ explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
+ OffsetAddend offset_addend = offset_zero);
int32_t offset() const { return offset_; }
bool OffsetIsInt16Encodable() const {
@@ -400,64 +419,6 @@ class MemOperand : public Operand {
};
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a CpuFeatureScope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, supported_);
- }
-
- static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, found_by_runtime_probing_only_);
- }
-
- static bool IsSafeForSnapshot(CpuFeature f) {
- return Check(f, cross_compile_) ||
- (IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
- }
-
- static bool VerifyCrossCompiling() {
- return cross_compile_ == 0;
- }
-
- static bool VerifyCrossCompiling(CpuFeature f) {
- unsigned mask = flag2set(f);
- return cross_compile_ == 0 ||
- (cross_compile_ & mask) == mask;
- }
-
- private:
- static bool Check(CpuFeature f, unsigned set) {
- return (set & flag2set(f)) != 0;
- }
-
- static unsigned flag2set(CpuFeature f) {
- return 1u << f;
- }
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_only_;
-
- static unsigned cross_compile_;
-
- friend class ExternalReference;
- friend class PlatformFeatureScope;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
@@ -517,7 +478,34 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
- static void set_target_address_at(Address pc, Address target);
+ static void set_target_address_at(Address pc,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
+ // On MIPS there is no Constant Pool so we skip that parameter.
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool)) {
+ return target_address_at(pc);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED)) {
+ set_target_address_at(pc, target, icache_flush_mode);
+ }
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -531,9 +519,10 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
+ Address instruction_payload, Code* code, Address target) {
set_target_address_at(
instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ code,
target);
}
@@ -657,7 +646,7 @@ class Assembler : public AssemblerBase {
void jal_or_jalr(int32_t target, Register rs);
- //-------Data-processing-instructions---------
+ // -------Data-processing-instructions---------
// Arithmetic.
void addu(Register rd, Register rs, Register rt);
@@ -695,7 +684,7 @@ class Assembler : public AssemblerBase {
void rotrv(Register rd, Register rt, Register rs);
- //------------Memory-instructions-------------
+ // ------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
void lbu(Register rd, const MemOperand& rs);
@@ -711,7 +700,12 @@ class Assembler : public AssemblerBase {
void swr(Register rd, const MemOperand& rs);
- //-------------Misc-instructions--------------
+ // ----------------Prefetch--------------------
+
+ void pref(int32_t hint, const MemOperand& rs);
+
+
+ // -------------Misc-instructions--------------
// Break / Trap instructions.
void break_(uint32_t code, bool break_as_stop = false);
@@ -744,7 +738,7 @@ class Assembler : public AssemblerBase {
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
- //--------Coprocessor-instructions----------------
+ // --------Coprocessor-instructions----------------
// Load, store, and move.
void lwc1(FPURegister fd, const MemOperand& src);
@@ -850,10 +844,10 @@ class Assembler : public AssemblerBase {
assem_->EndBlockGrowBuffer();
}
- private:
- Assembler* assem_;
+ private:
+ Assembler* assem_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
};
// Debugging.
@@ -971,6 +965,12 @@ class Assembler : public AssemblerBase {
void CheckTrampolinePool();
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
diff --git a/chromium/v8/src/mips/builtins-mips.cc b/chromium/v8/src/mips/builtins-mips.cc
index 19f3cdf4ff8..800a79e1920 100644
--- a/chromium/v8/src/mips/builtins-mips.cc
+++ b/chromium/v8/src/mips/builtins-mips.cc
@@ -1,41 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -162,10 +140,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// Tail call a stub.
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -297,16 +272,16 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push call kind information and function as parameter to the runtime call.
- __ Push(a1, t1, a1);
+ __ Push(a1, a1);
__ CallRuntime(function_id, 1);
// Restore call kind information and receiver.
- __ Pop(a1, t1);
+ __ Pop(a1);
}
@@ -318,7 +293,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -328,34 +309,27 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool create_memento) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a2 : allocation site or undefined
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
Isolate* isolate = masm->isolate();
@@ -370,25 +344,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(a2, a3);
+ __ push(a2);
+ }
+
// Preserve the two incoming parameters on the stack.
__ sll(a0, a0, kSmiTagSize); // Tag arguments count.
__ MultiPushReversed(a0.bit() | a1.bit());
- // Use t7 to hold undefined, which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
Label rt_call, allocated;
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
if (FLAG_inline_new) {
Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ li(a2, Operand(debug_step_in_fp));
__ lw(a2, MemOperand(a2));
__ Branch(&rt_call, ne, a2, Operand(zero_reg));
-#endif
// Load the initial map and verify that it is in fact a map.
// a1: constructor function
@@ -405,22 +379,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
- if (count_constructions) {
+ if (!is_api_function) {
Label allocate;
+ MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lw(t0, bit_field3);
+ __ DecodeField<Map::ConstructionCount>(t2, t0);
+ __ Branch(&allocate, eq, t2, Operand(JSFunction::kNoSlackTracking));
// Decrease generous allocation count.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
- __ lbu(t0, constructor_count);
- __ Subu(t0, t0, Operand(1));
- __ sb(t0, constructor_count);
- __ Branch(&allocate, ne, t0, Operand(zero_reg));
+ __ Subu(t0, t0, Operand(1 << Map::ConstructionCount::kShift));
+ __ Branch(USE_DELAY_SLOT,
+ &allocate, ne, t2, Operand(JSFunction::kFinishSlackTracking));
+ __ sw(t0, bit_field3); // In delay slot.
__ Push(a1, a2, a1); // a1 = Constructor.
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ Pop(a1, a2);
+ // Slack tracking counter is kNoSlackTracking after runtime call.
+ ASSERT(JSFunction::kNoSlackTracking == 0);
+ __ mov(t2, zero_reg);
__ bind(&allocate);
}
@@ -429,13 +407,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor function
// a2: initial map
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ Addu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
__ Allocate(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// a1: constructor function
// a2: initial map
- // a3: object size
+ // a3: object size (not including memento if create_memento)
// t4: JSObject (not tagged)
__ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
__ mov(t5, t4);
@@ -450,29 +432,63 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with appropriate filler.
// a1: constructor function
// a2: initial map
- // a3: object size (in words)
+ // a3: object size (in words, including memento if create_memento)
// t4: JSObject (not tagged)
// t5: First in-object property of JSObject (not tagged)
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t6, t4, t0); // End of object.
+ // t2: slack tracking counter (non-API function case)
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+
+ // Use t7 to hold undefined, which is used in several places below.
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ sll(t0, a0, kPointerSizeLog2);
- __ addu(a0, t5, t0);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ Branch(&no_inobject_slack_tracking,
+ eq, t2, Operand(JSFunction::kNoSlackTracking));
+
+ // Allocate object with a slack.
+ __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a0, t5, at);
// a0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
+ __ sll(at, a3, kPointerSizeLog2);
+ __ Addu(t6, t4, Operand(at)); // End of object.
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
a0, Operand(t6));
}
__ InitializeFieldsWithFiller(t5, a0, t7);
// To allow for truncation.
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+
+ if (create_memento) {
+ __ Subu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(a0, t4, Operand(a0)); // End of object.
+ __ InitializeFieldsWithFiller(t5, a0, t7);
+
+ // Fill in memento fields.
+ // t5: points to the allocated but uninitialized memento.
+ __ LoadRoot(t7, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ // Load the AllocationSite.
+ __ lw(t7, MemOperand(sp, 2 * kPointerSize));
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ } else {
+ __ sll(at, a3, kPointerSizeLog2);
+ __ Addu(a0, t4, Operand(at)); // End of object.
+ __ InitializeFieldsWithFiller(t5, a0, t7);
}
- __ InitializeFieldsWithFiller(t5, t6, t7);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -488,12 +504,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields
// and in-object properties.
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
+ __ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
__ Addu(a3, a3, Operand(t6));
- __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
+ __ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
__ subu(a3, a3, t6);
// Done if no extra properties are to be allocated.
@@ -541,11 +554,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ addu(t6, a2, t3); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
- if (count_constructions) {
+ if (!is_api_function || create_memento) {
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
} else if (FLAG_debug_code) {
- __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t8));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t2));
}
__ jmp(&entry);
__ bind(&loop);
@@ -576,18 +589,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ UndoAllocationInNewSpace(t4, t5);
}
- __ bind(&rt_call);
// Allocate the new receiver object using the runtime call.
// a1: constructor function
+ __ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+ __ push(a2);
+ }
+
__ push(a1); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(t4, v0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// Receiver for constructor call allocated.
// t4: JSObject
__ bind(&allocated);
- __ push(t4);
- __ push(t4);
+
+ if (create_memento) {
+ __ lw(a2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
+ __ Branch(&count_incremented, eq, a2, Operand(t5));
+ // a2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ lw(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ Addu(a3, a3, Operand(Smi::FromInt(1)));
+ __ sw(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
+ __ Push(t4, t4);
// Reload the number of arguments from the stack.
// sp[0]: receiver
@@ -630,17 +675,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -689,13 +731,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
@@ -762,15 +799,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(a0, a3);
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
}
// Leave internal frame.
@@ -790,22 +824,39 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ // Push function as parameter to the runtime call.
+ __ Push(a1, a1);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+ // Restore receiver.
+ __ Pop(a1);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
}
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
@@ -815,7 +866,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// Set a0 to point to the head of the PlatformCodeAge sequence.
__ Subu(a0, a0,
- Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
@@ -825,7 +876,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a2);
+ __ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
@@ -854,7 +905,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// Set a0 to point to the head of the PlatformCodeAge sequence.
__ Subu(a0, a0,
- Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
@@ -864,7 +915,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a2);
+ __ PrepareCallCFunction(2, 0, a2);
__ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
@@ -876,7 +927,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub.
- __ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize));
+ __ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength));
__ Jump(a0);
}
@@ -896,7 +947,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -922,7 +973,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> t2.
@@ -969,18 +1020,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ lw(a1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ lw(a2, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Subu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Subu(a1, a1, a2);
- __ SmiTag(a1);
-
- // Pass both function and pc offset as arguments.
+ // Pass function as argument.
__ push(a0);
- __ push(a1);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
// If the code object is null, just return to the unoptimized code.
@@ -1013,7 +1055,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ Branch(&ok, hs, sp, Operand(at));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1065,7 +1107,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
// Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
__ sll(at, a0, kPointerSizeLog2);
__ addu(a2, sp, at);
@@ -1089,9 +1131,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ push(a0);
-
- __ push(a2);
+ __ Push(a0, a2);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(a2, v0);
@@ -1106,14 +1146,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ li(t0, Operand(0, RelocInfo::NONE32));
__ Branch(&patch_receiver);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -1175,17 +1209,16 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Branch(&function, eq, t0, Operand(zero_reg));
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(a2, zero_reg);
- __ SetCallKind(t1, CALL_AS_METHOD);
__ Branch(&non_proxy, ne, t0, Operand(1));
__ push(a1); // Re-add proxy object as additional argument.
__ Addu(a0, a0, Operand(1));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@@ -1200,15 +1233,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ lw(a2,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
__ sra(a2, a2, kSmiTagSize);
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ SetCallKind(t1, CALL_AS_METHOD);
// Check formal and actual parameter counts.
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(0);
- __ InvokeCode(a3, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
@@ -1245,14 +1276,13 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ lw(a1, MemOperand(fp, kFunctionOffset));
__ Push(a1, v0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// End of stack check.
// Push current limit and index.
__ bind(&okay);
- __ push(v0); // Limit.
- __ mov(a1, zero_reg); // Initial index.
- __ push(a1);
+ __ mov(a1, zero_reg);
+ __ Push(v0, a1); // Limit and initial index.
// Get the receiver.
__ lw(a0, MemOperand(fp, kRecvOffset));
@@ -1280,7 +1310,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&push_receiver, ne, t3, Operand(zero_reg));
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(a0, &call_to_object);
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ Branch(&use_global_receiver, eq, a0, Operand(a1));
@@ -1301,13 +1331,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
__ Branch(&push_receiver);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+ __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@@ -1342,7 +1367,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ lw(a1, MemOperand(fp, kLimitOffset));
__ Branch(&loop, ne, a0, Operand(a1));
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(a0);
__ sra(a0, a0, kSmiTagSize);
@@ -1350,20 +1375,18 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ GetObjectType(a1, a2, a2);
__ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ Ret(USE_DELAY_SLOT);
__ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
__ push(a1); // Add function proxy as last argument.
__ Addu(a0, a0, Operand(1));
__ li(a2, Operand(0, RelocInfo::NONE32));
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
@@ -1374,6 +1397,27 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual number of arguments
+ // -- a1 : function (passed through to callee)
+ // -- a2 : expected number of arguments
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(t1, Heap::kRealStackLimitRootIndex);
+ // Make t1 the space we have left. The stack might already be overflowed
+ // here which will cause t1 to become negative.
+ __ subu(t1, sp, t1);
+ // Check if the arguments will overflow the stack.
+ __ sll(at, a2, kPointerSizeLog2);
+ // Signed comparison.
+ __ Branch(stack_overflow, le, t1, Operand(at));
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1406,13 +1450,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- a0: actual arguments count
// -- a1: function (passed through to callee)
// -- a2: expected arguments count
- // -- a3: callee code entry
- // -- t1: call kind information
// -----------------------------------
+ Label stack_overflow;
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
Label invoke, dont_adapt_arguments;
Label enough, too_few;
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Branch(&dont_adapt_arguments, eq,
a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
@@ -1517,6 +1562,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ Jump(a3);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ break_(0xCC);
+ }
}
diff --git a/chromium/v8/src/mips/code-stubs-mips.cc b/chromium/v8/src/mips/code-stubs-mips.cc
index 4c3708ce7a5..0287a9a616f 100644
--- a/chromium/v8/src/mips/code-stubs-mips.cc
+++ b/chromium/v8/src/mips/code-stubs-mips.cc
@@ -1,57 +1,41 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a2 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
@@ -61,50 +45,51 @@ void ToNumberStub::InitializeInterfaceDescriptor(
void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a3, a2, a1 };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->register_param_representations_ = representations;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a3, a2, a1, a0 };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { a2, a3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1, a0 };
descriptor->register_param_count_ = 2;
@@ -115,7 +100,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = {a1, a0 };
descriptor->register_param_count_ = 2;
@@ -125,8 +109,27 @@ void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
}
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2, a1, a0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
@@ -136,7 +139,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1 };
descriptor->register_param_count_ = 1;
@@ -145,21 +147,25 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
-void KeyedArrayCallStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void StringLengthStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { a0, a2 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
- descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a2, a1, a0 };
descriptor->register_param_count_ = 3;
@@ -170,7 +176,6 @@ void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0, a1 };
descriptor->register_param_count_ = 2;
@@ -182,7 +187,6 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
@@ -190,18 +194,17 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
}
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// a0 -- number of arguments
// a1 -- function
- // a2 -- type info cell with elements kind
+ // a2 -- allocation site with elements kind
static Register registers_variable_args[] = { a1, a2, a0 };
static Register registers_no_args[] = { a1, a2 };
@@ -214,17 +217,21 @@ static void InitializeArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = a0;
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
@@ -242,38 +249,38 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = a0;
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeArrayConstructorDescriptor(descriptor, 0);
}
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeArrayConstructorDescriptor(descriptor, 1);
}
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeArrayConstructorDescriptor(descriptor, -1);
}
void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0 };
descriptor->register_param_count_ = 1;
@@ -281,33 +288,29 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
}
void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1, a2, a0 };
descriptor->register_param_count_ = 3;
@@ -318,7 +321,6 @@ void StoreGlobalStub::InitializeInterfaceDescriptor(
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a0, a3, a1, a2 };
descriptor->register_param_count_ = 4;
@@ -328,14 +330,118 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
-void NewStringAddStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void BinaryOpICStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1, a0 };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2, a1, a0 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { a1, // JSFunction
+ cp, // context
+ a0, // actual number of arguments
+ a2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ a2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ a2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ a0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { a0, // callee
+ t0, // call_data
+ a2, // holder
+ a1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
}
@@ -358,19 +464,21 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
ASSERT(descriptor->register_param_count_ == 0 ||
a0.is(descriptor->register_params_[param_count - 1]));
- // Push arguments
+ // Push arguments, adjust sp.
+ __ Subu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->register_params_[i]);
+ // Store argument to stack.
+ __ sw(descriptor->register_params_[i],
+ MemOperand(sp, (param_count-1-i) * kPointerSize));
}
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
@@ -380,107 +488,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ li(a1, Operand(Smi::FromInt(0)));
- __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(1);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(a3, &after_sentinel);
- if (FLAG_debug_code) {
- __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
- }
- __ lw(a3, GlobalObjectOperand());
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
- __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
- __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
- __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
- __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ DropAndRet(2);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -488,11 +495,13 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// stub so you don't have to set up the frame.
class ConvertToDoubleStub : public PlatformCodeStub {
public:
- ConvertToDoubleStub(Register result_reg_1,
+ ConvertToDoubleStub(Isolate* isolate,
+ Register result_reg_1,
Register result_reg_2,
Register source_reg,
Register scratch_reg)
- : result1_(result_reg_1),
+ : PlatformCodeStub(isolate),
+ result1_(result_reg_1),
result2_(result_reg_2),
source_(source_reg),
zeros_(scratch_reg) { }
@@ -521,13 +530,14 @@ class ConvertToDoubleStub : public PlatformCodeStub {
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
- Register exponent = result1_;
- Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
+ Register exponent, mantissa;
+ if (kArchEndian == kLittle) {
+ exponent = result1_;
+ mantissa = result2_;
+ } else {
+ exponent = result2_;
+ mantissa = result1_;
+ }
Label not_special;
// Convert from Smi to integer.
__ sra(source_, source_, kSmiTagSize);
@@ -610,7 +620,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Try a conversion to a signed integer.
__ Trunc_w_d(double_scratch, double_scratch);
// Move the converted value into the result register.
- __ mfc1(result_reg, double_scratch);
+ __ mfc1(scratch3, double_scratch);
// Retrieve and restore the FCSR.
__ cfc1(scratch, FCSR);
@@ -621,16 +631,22 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
scratch, scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
| kFCSRInvalidOpFlagMask);
- // If we had no exceptions we are done.
- __ Branch(&done, eq, scratch, Operand(zero_reg));
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
}
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
- __ lw(input_low, MemOperand(input_reg, double_offset));
- __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
+ __ lw(input_low,
+ MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ __ lw(input_high,
+ MemOperand(input_reg, double_offset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
@@ -716,10 +732,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
- WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode(isolate);
- stub2.GetCode(isolate);
+ WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
+ WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
+ stub1.GetCode();
+ stub2.GetCode();
}
@@ -1109,8 +1125,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// f12, f14 are the double representations of the left hand side
// and the right hand side if we have FPU. Otherwise a2, a3 represent
// left hand side and a0, a1 represent right hand side.
-
- Isolate* isolate = masm->isolate();
Label nan;
__ li(t0, Operand(LESS));
__ li(t1, Operand(GREATER));
@@ -1185,7 +1199,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
+ a3);
if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
lhs,
@@ -1234,6 +1249,31 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushSafepointRegistersAndDoubles();
+ } else {
+ __ PushSafepointRegisters();
+ }
+ __ Jump(t9);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ mov(t9, ra);
+ __ pop(ra);
+ __ StoreToSafepointRegisterSlot(t9, t9);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopSafepointRegistersAndDoubles();
+ } else {
+ __ PopSafepointRegisters();
+ }
+ __ Jump(t9);
+}
+
+
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@@ -1248,9 +1288,9 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
__ MultiPopFPU(kCallerSavedFPU);
@@ -1261,253 +1301,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a1, a0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in f4, double result goes
- // into f4.
- // Tagged case: tagged input on top of stack and in a0,
- // tagged result (heap number) goes into v0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = t5;
- const Register scratch1 = t3;
- const Register cache_entry = a0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (tagged) {
- // Argument is a number and is on stack and in a0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(a0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into a2, a3.
- __ sra(t0, a0, kSmiTagSize);
- __ mtc1(t0, f4);
- __ cvt_d_w(f4, f4);
- __ Move(a2, a3, f4);
- __ Branch(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(a0,
- a1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Store the
- // low and high words into a2, a3.
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
- } else {
- // Input is untagged double in f4. Output goes to f4.
- __ Move(a2, a3, f4);
- }
- __ bind(&loaded);
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ Xor(a1, a2, a3);
- __ sra(t0, a1, 16);
- __ Xor(a1, a1, t0);
- __ sra(t0, a1, 8);
- __ Xor(a1, a1, t0);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // a1 = TranscendentalCache::hash(double value).
- __ li(cache_entry, Operand(
- ExternalReference::transcendental_cache_array_address(
- masm->isolate())));
- // a0 points to cache array.
- __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
- Isolate::Current()->transcendental_cache()->caches_[0])));
- // a0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
- __ sll(t0, a1, 1);
- __ Addu(a1, a1, t0);
- __ sll(t0, a1, 2);
- __ Addu(cache_entry, cache_entry, t0);
-
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ lw(t0, MemOperand(cache_entry, 0));
- __ lw(t1, MemOperand(cache_entry, 4));
- __ lw(t2, MemOperand(cache_entry, 8));
- __ Branch(&calculate, ne, a2, Operand(t0));
- __ Branch(&calculate, ne, a3, Operand(t1));
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into v0.
- __ Drop(1);
- __ mov(v0, t2);
- } else {
- // Load result into f4.
- __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
- }
- __ Ret();
-
- __ bind(&calculate);
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
- masm->isolate()),
- 1,
- 1);
- } else {
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // a0: precalculated cache entry address.
- // a2 and a3: parts of the double value.
- // Store a0, a2 and a3 on stack for later before calling C function.
- __ Push(a3, a2, cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ Pop(a3, a2, cache_entry);
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
- __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
-
- __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
- __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
- __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, cache_entry);
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
- __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(f4);
- __ bind(&no_update);
-
- // We return the value in f4 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ li(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- __ push(ra);
- __ PrepareCallCFunction(2, scratch);
- if (IsMipsSoftFloatABI) {
- __ Move(a0, a1, f4);
- } else {
- __ mov_d(f12, f4);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- Isolate* isolate = masm->isolate();
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(
- ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(
- ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(
- ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(ra);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
const Register exponent = a2;
@@ -1624,13 +1417,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch2);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
@@ -1687,11 +1480,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1708,13 +1501,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
+ ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ __ MovFromFloatResult(double_result);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
@@ -1736,74 +1529,83 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(
+ Isolate* isolate) {
+ StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(
+ Isolate* isolate) {
+ RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
- CEntryStub save_doubles(1, mode);
- StoreBufferOverflowStub stub(mode);
+ CEntryStub save_doubles(isolate, 1, mode);
+ StoreBufferOverflowStub stub(isolate, mode);
// These stubs might already be in the snapshot, detect that and don't
// regenerate, which would lead to code stub initialization state being messed
// up.
Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
- save_doubles_code = *save_doubles.GetCode(isolate);
+ if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
+ save_doubles_code = *save_doubles.GetCode();
}
Code* store_buffer_overflow_code;
- if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
- store_buffer_overflow_code = *stub.GetCode(isolate);
+ if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
+ store_buffer_overflow_code = *stub.GetCode();
}
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ andi(scratch, value, 0xf);
- __ Branch(oom_label, eq, scratch, Operand(0xf));
-}
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // s0: number of arguments including receiver
+ // s1: size of arguments excluding receiver
+ // s2: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // v0: result parameter for PerformGC, if any
- // s0: number of arguments including receiver (C callee-saved)
- // s1: pointer to the first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
+ // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
+ // The reason for this is that these arguments would need to be saved anyway
+ // so it's faster to set them up directly.
+ // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
- Isolate* isolate = masm->isolate();
+ // Compute the argv pointer in a callee-saved register.
+ __ Addu(s1, sp, s1);
- if (do_gc) {
- // Move result passed in v0 into a0 to call PerformGC.
- __ mov(a0, v0);
- __ PrepareCallCFunction(2, 0, a1);
- __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
- }
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles_);
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ li(a0, Operand(scope_depth));
- __ lw(a1, MemOperand(a0));
- __ Addu(a1, a1, Operand(1));
- __ sw(a1, MemOperand(a0));
- }
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
// Prepare arguments for C routine.
// a0 = argc
@@ -1815,7 +1617,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ AssertStackIsAligned();
- __ li(a2, Operand(ExternalReference::isolate_address(isolate)));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -1850,154 +1652,67 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
masm->InstructionsGeneratedSince(&find_ra));
}
- if (always_allocate) {
- // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
- __ li(a2, Operand(scope_depth));
- __ lw(a3, MemOperand(a2));
- __ Subu(a3, a3, Operand(1));
- __ sw(a3, MemOperand(a2));
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ Branch(&okay, ne, v0, Operand(t0));
+ __ stop("The hole escaped");
+ __ bind(&okay);
}
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ addiu(a2, v0, 1);
- __ andi(t0, a2, kFailureTagMask);
- __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
- // Restore stack (remove arg slots) in branch delay slot.
- __ addiu(sp, sp, kCArgsSlotsSize);
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ LoadRoot(t0, Heap::kExceptionRootIndex);
+ __ Branch(&exception_returned, eq, t0, Operand(v0));
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ li(a2, Operand(pending_exception_address));
+ __ lw(a2, MemOperand(a2));
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ Branch(&okay, eq, t0, Operand(a2));
+ __ stop("Unexpected pending exception");
+ __ bind(&okay);
+ }
// Exit C frame and return.
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
+ // s0: still holds argc (callee-saved).
__ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
- // Check if we should retry or throw exception.
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
- __ Branch(&retry, eq, t0, Operand(zero_reg));
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
+ // Handling of exception.
+ __ bind(&exception_returned);
// Retrieve the pending exception.
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ lw(v0, MemOperand(t0));
-
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
+ __ li(a2, Operand(pending_exception_address));
+ __ lw(v0, MemOperand(a2));
// Clear the pending exception.
- __ li(a3, Operand(isolate->factory()->the_hole_value()));
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(a3, MemOperand(t0));
+ __ li(a3, Operand(isolate()->factory()->the_hole_value()));
+ __ sw(a3, MemOperand(a2));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
+ Label throw_termination_exception;
__ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
- __ Branch(throw_termination_exception, eq, v0, Operand(t0));
+ __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
// Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry);
- // Last failure (v0) will be moved to (a0) for parameter when retrying.
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // s0: number of arguments including receiver
- // s1: size of arguments excluding receiver
- // s2: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
- // The reason for this is that these arguments would need to be saved anyway
- // so it's faster to set them up directly.
- // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
-
- // Compute the argv pointer in a callee-saved register.
- __ Addu(s1, sp, s1);
-
- // Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
-
- // s0: number of arguments (C callee-saved)
- // s1: pointer to first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ li(a0, Operand(false, RelocInfo::NONE32));
- __ li(a2, Operand(external_caught));
- __ sw(a0, MemOperand(a2));
-
- // Set pending exception and v0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, v0, t0, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(v0, MemOperand(a2));
- // Fall through to the next label.
+ __ Throw(v0);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(v0);
-
- __ bind(&throw_normal_exception);
- __ Throw(v0);
}
@@ -2089,7 +1804,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
- __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ LoadRoot(v0, Heap::kExceptionRootIndex);
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
@@ -2314,7 +2029,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ Branch(&object_not_null,
ne,
scratch,
- Operand(masm->isolate()->factory()->null_value()));
+ Operand(isolate()->factory()->null_value()));
__ li(v0, Operand(Smi::FromInt(1)));
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -2362,7 +2077,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
// -- a1 : receiver
// -----------------------------------
__ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->prototype_string()));
+ Operand(isolate()->factory()->prototype_string()));
receiver = a1;
} else {
ASSERT(kind() == Code::LOAD_IC);
@@ -2382,108 +2097,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = a0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
-
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a1,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a2;
- value = a0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : key
- // -----------------------------------
- receiver = a1;
- value = a0;
- }
- Register scratch = a3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ GetObjectType(scratch, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&miss, eq, scratch, Operand(at));
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
Register InstanceofStub::left() { return a0; }
@@ -2543,7 +2156,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
@@ -2565,11 +2178,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
@@ -2633,7 +2246,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
@@ -2642,7 +2255,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// a2 = argument count (tagged)
// Get the arguments boilerplate from the current native context into t0.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
@@ -2683,7 +2296,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, t0 will point there, otherwise
// it will point to the backing store.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
// v0 = address of new object (tagged)
@@ -2701,7 +2314,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
- __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
__ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
__ Addu(t2, a1, Operand(Smi::FromInt(2)));
__ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
@@ -2784,7 +2397,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// a2 = argument count (tagged)
__ bind(&runtime);
__ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
@@ -2824,7 +2437,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(a1, v0, a2, a3, &runtime,
@@ -2834,7 +2447,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
__ lw(t0, MemOperand(t0, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
@@ -2853,7 +2466,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
__ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
__ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
@@ -2882,7 +2495,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
}
@@ -2891,7 +2504,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2905,8 +2518,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
- Isolate* isolate = masm->isolate();
-
Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
@@ -2921,9 +2532,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(
- isolate);
+ isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ li(a0, Operand(address_of_regexp_stack_memory_size));
__ lw(a0, MemOperand(a0, 0));
__ Branch(&runtime, eq, a0, Operand(zero_reg));
@@ -3031,8 +2642,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
// The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
// (5) Sequential string. Load regexp code according to encoding.
@@ -3070,7 +2681,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(),
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
@@ -3094,7 +2705,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots.
- __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
__ sw(a0, MemOperand(sp, 5 * kPointerSize));
// Argument 8: Indicate that this is a direct call from JavaScript.
@@ -3116,7 +2727,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 5: static offsets vector buffer.
__ li(a0, Operand(
- ExternalReference::address_of_static_offsets_vector(isolate)));
+ ExternalReference::address_of_static_offsets_vector(isolate())));
__ sw(a0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data
@@ -3149,7 +2760,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Locate the code entry and call it.
__ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, t9);
__ LeaveExitFrame(false, no_reg, true);
@@ -3171,9 +2782,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ li(a1, Operand(isolate->factory()->the_hole_value()));
+ __ li(a1, Operand(isolate()->factory()->the_hole_value()));
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ isolate())));
__ lw(v0, MemOperand(a2, 0));
__ Branch(&runtime, eq, v0, Operand(a1));
@@ -3191,7 +2802,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure and exception return null.
- __ li(v0, Operand(isolate->factory()->null_value()));
+ __ li(v0, Operand(isolate()->factory()->null_value()));
__ DropAndRet(4);
// Process the result from the native regexp code.
@@ -3252,7 +2863,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
+ ExternalReference::address_of_static_offsets_vector(isolate());
__ li(a2, Operand(address_of_static_offsets_vector));
// a1: number of capture registers
@@ -3283,7 +2894,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3329,287 +2940,259 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ lw(a1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(a1, &slowcase);
- __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
- __ Addu(a2, t1, Operand(objects_size));
- __ Allocate(
- a2, // In: Size, in words.
- v0, // Out: Start of allocation (tagged).
- a3, // Scratch register.
- t0, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // v0: Start of allocated area, object-tagged.
- // a1: Number of elements in array, as smi.
- // t1: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
- __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
- __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ lw(a1, MemOperand(sp, kPointerSize * 0));
- __ lw(a2, MemOperand(sp, kPointerSize * 1));
- __ lw(t2, MemOperand(sp, kPointerSize * 2));
- __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
- __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
- __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // v0: JSArray, tagged.
- // a3: FixedArray, tagged.
- // t1: Number of elements in array, untagged.
-
- // Set map.
- __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ sll(t2, t1, kSmiTagSize);
- __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with undefined.
- // v0: JSArray, tagged.
- // a2: undefined.
- // a3: Start of elements in FixedArray.
- // t1: Number of elements to fill.
- Label loop;
- __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
- __ addu(t1, t1, a3); // Point past last element to store.
- __ bind(&loop);
- __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
- __ sw(a2, MemOperand(a3));
- __ Branch(&loop, USE_DELAY_SLOT);
- __ addiu(a3, a3, kPointerSize); // In branch delay slot.
-
- __ bind(&done);
- __ DropAndRet(3);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : Feedback vector
+ // a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ // Load the cache state into t0.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ Branch(&done, eq, a3, Operand(a1));
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in a3.
- __ lw(t1, FieldMemOperand(a3, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, t1, Operand(at));
-
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&megamorphic, ne, a1, Operand(a3));
- __ jmp(&done);
+ __ Branch(&done, eq, t0, Operand(a1));
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in a3.
+ __ lw(t1, FieldMemOperand(t0, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, t1, Operand(at));
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(&megamorphic, ne, a1, Operand(t0));
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialize, eq, a3, Operand(at));
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&initialize, eq, t0, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
+ // An uninitialized cache is patched with the function.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&not_array_function, ne, a1, Operand(a3));
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(&not_array_function, ne, a1, Operand(t0));
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ const RegList kSavedRegs =
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
- // The target function is the Array constructor.
- // Create an AllocationSite if we don't already have it, store it in the cell.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs =
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6; // a2
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(a0);
+ __ MultiPush(kSavedRegs);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(a0);
- __ MultiPush(kSavedRegs);
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ __ CallStub(&create_stub);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ __ MultiPop(kSavedRegs);
+ __ SmiUntag(a0);
+ }
+ __ Branch(&done);
- __ MultiPop(kSavedRegs);
- __ SmiUntag(a0);
+ __ bind(&not_array_function);
}
- __ Branch(&done);
- __ bind(&not_array_function);
- __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sw(a1, MemOperand(t0, 0));
+
+ __ Push(t0, a2, a1);
+ __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(t0, a2, a1);
__ bind(&done);
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // a1 : the function to call
- // a2 : cache cell for call target
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call, ne, t0, Operand(at));
- // Patch the receiver on the stack with the global receiver object.
- __ lw(a3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
- }
-
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
- // Get the map of the function object.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- }
+ // Do not transform the receiver for strict mode functions.
+ int32_t strict_mode_function_mask =
+ 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ // Do not transform the receiver for native (Compilerhints already in a3).
+ int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+ __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
+ __ Branch(cont, ne, at, Operand(zero_reg));
+}
- // Fast-case: Invoke the function now.
- // a1: pushed function
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&call_as_function, eq, t0, Operand(at));
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
- }
+static void EmitSlowCase(MacroAssembler* masm,
+ int argc,
+ Label* non_function) {
// Check for function proxy.
- __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ push(a1); // Put proxy as additional argument.
- __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(t1, CALL_AS_METHOD);
+ __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ push(a1); // put proxy as additional argument
+ __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
+ __ mov(a2, zero_reg);
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
- __ bind(&non_function);
- __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
- __ li(a0, Operand(argc_)); // Set up the number of arguments.
+ __ bind(non_function);
+ __ sw(a1, MemOperand(sp, argc * kPointerSize));
+ __ li(a0, Operand(argc)); // Set up the number of arguments.
__ mov(a2, zero_reg);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(t1, CALL_AS_METHOD);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(a1);
+ }
+ __ Branch(USE_DELAY_SLOT, cont);
+ __ sw(v0, MemOperand(sp, argc * kPointerSize));
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // a1 : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function is really a JavaScript function.
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
+ }
+
+ // Fast-case: Invoke the function now.
+ // a1: pushed function
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ lw(a3, MemOperand(sp, argc * kPointerSize));
+
+ if (needs_checks) {
+ __ JumpIfSmi(a3, &wrap);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : feedback vector
+ // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, at);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into a2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by a3 + 1.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(a2, t1);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = a3;
+ Register jmp_reg = t0;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3618,21 +3201,157 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
- // a3: object type
+ // t0: object type
Label do_call;
__ bind(&slow);
- __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
__ bind(&non_function_call);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ li(a2, Operand(0, RelocInfo::NONE32));
- __ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(vector, FieldMemOperand(vector,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ lw(vector, FieldMemOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // a1 - function
+ // a3 - slot id
+ Label miss;
+
+ EmitLoadTypeFeedbackVector(masm, a2);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
+ __ Branch(&miss, ne, a1, Operand(at));
+
+ __ li(a0, Operand(arg_count()));
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(at, a2, Operand(at));
+ __ lw(a2, FieldMemOperand(at, FixedArray::kHeaderSize));
+ // Verify that a2 contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(a2, at);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unexpected code address");
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // r1 - function
+ // r3 - slot id (Smi)
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, a2);
+
+ // The checks. First, does r1 match the recorded monomorphic target?
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
+ __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ // Compute the receiver in sloppy mode.
+ __ lw(a3, MemOperand(sp, argc * kPointerSize));
+
+ __ JumpIfSmi(a3, &wrap);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ Branch(&slow_start, eq, t0, Operand(at));
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&miss, eq, t0, Operand(at));
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
+ __ Branch(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Miss);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(t0, a1, a2, a3);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to a1 and exit the internal frame.
+ __ mov(a1, v0);
+ }
}
@@ -3700,7 +3419,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
@@ -3724,7 +3443,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
__ Move(result_, v0);
@@ -3783,290 +3502,53 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for
- // very short strings.
- if (!ascii) {
- __ addu(count, count, count);
- }
- __ Branch(&done, eq, count, Operand(zero_reg));
- __ addu(count, dest, count); // Count now points to the last dest byte.
-
- __ bind(&loop);
- __ lbu(scratch, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ Branch(&loop, lt, dest, Operand(count));
-
- __ bind(&done);
-}
-
-
enum CopyCharactersFlags {
COPY_ASCII = 1,
DEST_ALWAYS_ALIGNED = 2
};
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
- __ And(scratch4, dest, Operand(kPointerAlignmentMask));
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ String::Encoding encoding) {
+ if (FLAG_debug_code) {
+ // Check that destination is word aligned.
+ __ And(scratch, dest, Operand(kPointerAlignmentMask));
__ Check(eq,
kDestinationOfCopyNotAligned,
- scratch4,
+ scratch,
Operand(zero_reg));
}
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
// Assumes word reads and writes are little endian.
// Nothing to do for zero characters.
Label done;
- if (!ascii) {
- __ addu(count, count, count);
- }
- __ Branch(&done, eq, count, Operand(zero_reg));
-
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ Subu(scratch1, count, Operand(8));
- __ Addu(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ And(scratch4, dest, Operand(kReadAlignmentMask));
- Label dest_aligned;
- __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
- Label aligned_loop;
- __ bind(&aligned_loop);
- __ lbu(scratch1, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch1, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ addiu(scratch4, scratch4, 1);
- __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
- __ bind(&dest_aligned);
- }
-
- Label simple_loop;
-
- __ And(scratch4, src, Operand(kReadAlignmentMask));
- __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
-
- // Loop for src/dst that are not aligned the same way.
- // This loop uses lwl and lwr instructions. These instructions
- // depend on the endianness, and the implementation assumes little-endian.
- {
- Label loop;
- __ bind(&loop);
- __ lwr(scratch1, MemOperand(src));
- __ Addu(src, src, Operand(kReadAlignment));
- __ lwl(scratch1, MemOperand(src, -1));
- __ sw(scratch1, MemOperand(dest));
- __ Addu(dest, dest, Operand(kReadAlignment));
- __ Subu(scratch2, limit, dest);
- __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ __ Addu(count, count, count);
}
- __ Branch(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dest, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ lw(scratch1, MemOperand(src));
- __ Addu(src, src, Operand(kReadAlignment));
- __ sw(scratch1, MemOperand(dest));
- __ Addu(dest, dest, Operand(kReadAlignment));
- __ Subu(scratch2, limit, dest);
- __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
- }
+ Register limit = count; // Read until dest equals this.
+ __ Addu(limit, dest, Operand(count));
+ Label loop_entry, loop;
// Copy bytes from src to dest until dest hits limit.
- __ bind(&byte_loop);
- // Test if dest has already reached the limit.
- __ Branch(&done, ge, dest, Operand(limit));
- __ lbu(scratch1, MemOperand(src));
- __ addiu(src, src, 1);
- __ sb(scratch1, MemOperand(dest));
- __ addiu(dest, dest, 1);
- __ Branch(&byte_loop);
+ __ Branch(&loop_entry);
+ __ bind(&loop);
+ __ lbu(scratch, MemOperand(src));
+ __ Addu(src, src, Operand(1));
+ __ sb(scratch, MemOperand(dest));
+ __ Addu(dest, dest, Operand(1));
+ __ bind(&loop_entry);
+ __ Branch(&loop, lt, dest, Operand(limit));
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ Subu(scratch, c1, Operand(static_cast<int>('0')));
- __ Branch(&not_array_index,
- Ugreater,
- scratch,
- Operand(static_cast<int>('9' - '0')));
- __ Subu(scratch, c2, Operand(static_cast<int>('0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register.
- Label tmp;
- __ sll(scratch1, c2, kBitsPerByte);
- __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
- __ Or(c1, c1, scratch1);
- __ bind(&tmp);
- __ Branch(
- not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ sll(scratch, c2, kBitsPerByte);
- __ Or(chars, chars, scratch);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load string table.
- // Load address of first element of the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
- __ sra(mask, mask, 1);
- __ Addu(mask, mask, -1);
-
- // Calculate untagged address of the first element of the string table.
- Register first_string_table_element = string_table;
- __ Addu(first_string_table_element, string_table,
- Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers.
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_string_table_element: address of the first element of
- // the string table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the string table.
- const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- if (i > 0) {
- __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ And(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ sll(scratch, candidate, kPointerSizeLog2);
- __ Addu(scratch, scratch, first_string_table_element);
- __ lw(candidate, MemOperand(scratch));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ GetObjectType(candidate, scratch, scratch);
- __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
-
- __ Branch(not_found, eq, undefined, Operand(candidate));
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole,
- scratch, Operand(candidate));
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
- __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ mov(v0, result);
-}
-
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
@@ -4263,7 +3745,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Handle external string.
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ And(t0, a1, Operand(kShortExternalStringTag));
__ Branch(&runtime, ne, t0, Operand(zero_reg));
__ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
@@ -4295,8 +3777,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// a2: result string length
// t1: first character of substring to copy
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharacters(
+ masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
__ jmp(&return_v0);
// Allocate and copy the resulting two-byte string.
@@ -4315,17 +3797,17 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// a2: result length.
// t1: first character of substring to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+ StringHelper::GenerateCopyCharacters(
+ masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
__ bind(&return_v0);
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
__ DropAndRet(3);
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// v0: original string
@@ -4463,7 +3945,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
// Stack frame on entry.
// sp[0]: right string
@@ -4490,356 +3972,35 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfEitherSmi(a0, a1, &call_runtime);
- // Load instance types.
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ Or(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
-
- // Both arguments are strings.
- // a0: first string
- // a1: second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- // These tests use zero-length check on string-length whch is an Smi.
- // Assert that Smi::FromInt(0) is really 0.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(Smi::FromInt(0) == 0);
- __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
- __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
- __ mov(v0, a0); // Assume we'll return first string (from a0).
- __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
- __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
- __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
- __ and_(t4, t4, t5); // Branch if both strings were non-empty.
- __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&strings_not_empty);
- }
-
- // Untag both string-lengths.
- __ sra(a2, a2, kSmiTagSize);
- __ sra(a3, a3, kSmiTagSize);
-
- // Both strings are non-empty.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ Addu(t2, a2, Operand(a3));
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return a string here.
- __ Branch(&longer_than_two, ne, t2, Operand(2));
-
- // Check that both strings are non-external ASCII strings.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in a2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode).
- __ li(t2, Operand(2));
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
- __ And(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kStringEncodingMask));
- __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ li(t0, Operand(high_promotion_mode));
- __ lw(t0, MemOperand(t0, 0));
- __ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
-
- __ mov(t3, v0);
- __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
- __ RecordWriteField(t3,
- ConsString::kFirstOffset,
- a0,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
- __ RecordWriteField(t3,
- ConsString::kSecondOffset,
- a1,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
-
- __ bind(&skip_write_barrier);
- __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
- __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
-
- __ bind(&after_writing);
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // t0: first instance type.
- // t1: second instance type.
- // Branch to if _both_ instances have kOneByteDataHintMask set.
- __ And(at, t0, Operand(kOneByteDataHintMask));
- __ and_(at, at, t1);
- __ Branch(&ascii_data, ne, at, Operand(zero_reg));
- __ Xor(t0, t0, Operand(t1));
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ And(t0, t0, Operand(kOneByteStringTag | kOneByteDataHintTag));
- __ Branch(&ascii_data, eq, t0,
- Operand(kOneByteStringTag | kOneByteDataHintTag));
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
- __ Branch(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // a0: first string
- // a1: second string
- // a2: length of first string
- // a3: length of second string
- // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // t2: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
- }
- // Check whether both strings have same encoding
- __ Xor(t3, t0, Operand(t1));
- __ And(t3, t3, Operand(kStringEncodingMask));
- __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t0, Operand(kStringRepresentationMask));
-
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_first_add;
- __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_first_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t0, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- Label skip_second_add;
- __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
- __ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ bind(&skip_second_add);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ And(t4, t1, Operand(kShortExternalStringMask));
- __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
- __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // t3: first character of first string
- // a1: first character of second string
- // a2: length of first string
- // a3: length of second string
- // t2: sum of lengths.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ And(t4, t1, Operand(kStringEncodingMask));
- __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
-
- __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
-
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
-
- __ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // v0: result string.
- // t3: first character of first string.
- // a1: first character of second string.
- // a2: length of first string.
- // a3: length of second string.
- // t2: first character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
- // t2: next character of result.
- StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
-
- __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ DropAndRet(2);
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : left
+ // -- a0 : right
+ // -- ra : return address
+ // -----------------------------------
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+ // Load a2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ li(a2, handle(isolate()->heap()->undefined_value()));
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ And(at, a2, Operand(kSmiTagMask));
+ __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
+ __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
}
-}
-
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(a0);
- __ push(a1);
-}
-
-
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(a1);
- __ pop(a0);
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ GetObjectType(arg, scratch1, scratch1);
- __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
- __ mov(arg, scratch1);
- __ sw(arg, MemOperand(sp, stack_offset));
- __ bind(&done);
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
}
@@ -4934,9 +4095,9 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
CompareIC::GENERIC);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
@@ -5121,7 +4282,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -5170,7 +4331,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
__ Push(ra, a1, a0);
@@ -5215,7 +4376,7 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
intptr_t loc =
- reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target);
__ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(ra);
@@ -5290,7 +4451,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ MultiPush(spill_mask);
__ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ li(a1, Operand(Handle<Name>(name)));
- NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
__ mov(at, v0);
__ MultiPop(spill_mask);
@@ -5369,7 +4530,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ Move(a0, elements);
__ Move(a1, name);
}
- NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
__ mov(scratch2, a2);
__ mov(at, v0);
@@ -5477,16 +4638,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
// Hydrogen code stubs need stub2 at snapshot time.
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return true; // FPU is a base requirement for V8.
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
@@ -5553,7 +4709,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -5566,13 +4722,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -5583,21 +4739,12 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
__ Move(a1, address);
- __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5756,8 +4903,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ lw(a1, MemOperand(fp, parameter_count_offset));
@@ -5771,27 +4918,9 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
-void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ mov(a1, v0);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ lw(a0, MemOperand(fp, parameter_count_offset));
- // The parameter count above includes the receiver for the arguments passed to
- // the deoptimization handler. Subtract the receiver for the parameter count
- // for the call.
- __ Subu(a0, a0, 1);
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ParameterCount argument_count(a0);
- __ InvokeFunction(
- a1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- ProfileEntryHookStub stub;
+ ProfileEntryHookStub stub(masm->isolate());
__ push(ra);
__ CallStub(&stub);
__ pop(ra);
@@ -5830,27 +4959,30 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
ASSERT(IsPowerOf2(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
-
+ __ Subu(sp, sp, kCArgsSlotsSize);
#if defined(V8_HOST_ARCH_MIPS)
int32_t entry_hook =
- reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
- __ li(at, Operand(entry_hook));
+ reinterpret_cast<int32_t>(isolate()->function_entry_hook());
+ __ li(t9, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
// It additionally takes an isolate as a third parameter.
- __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ li(at, Operand(ExternalReference(&dispatcher,
+ __ li(t9, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
- masm->isolate())));
+ isolate())));
#endif
- __ Call(at);
+ // Call C function through t9 to conform ABI for PIC.
+ __ Call(t9);
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
__ mov(sp, s5);
+ } else {
+ __ Addu(sp, sp, kCArgsSlotsSize);
}
// Also pop ra to get Ret(0).
@@ -5863,20 +4995,15 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
- mode);
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- T stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
}
// If we reached this point there is a problem.
@@ -5889,7 +5016,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // a2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// a0 - number of arguments
// a1 - constructor?
@@ -5916,48 +5043,43 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ Addu(a3, a3, Operand(1));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
if (FLAG_debug_code) {
- __ lw(t1, FieldMemOperand(t1, 0));
+ __ lw(t1, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSiteInCell, t1, Operand(at));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
}
// Save the resulting elements kind in type info. We can't just store a3
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ lw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+ __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ sw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+ __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
- Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- ArraySingleArgumentConstructorStub stub(kind);
- __ TailCallStub(&stub);
- __ bind(&next);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq, a3, Operand(kind));
}
// If we reached this point there is a problem.
@@ -5970,20 +5092,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
}
}
}
@@ -6004,12 +5121,12 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate);
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
}
}
@@ -6045,48 +5162,35 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc (only if argument_count_ == ANY)
// -- a1 : constructor
- // -- a2 : type info cell
+ // -- a2 : AllocationSite or undefined
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ SmiTst(a3, at);
+ __ SmiTst(t0, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
- __ GetObjectType(a3, a3, t0);
+ __ GetObjectType(t0, t0, t1);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t0, Operand(MAP_TYPE));
+ t1, Operand(MAP_TYPE));
- // We should either have undefined in a2 or a valid cell.
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&okay_here, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, kExpectedPropertyCellInRegisterA2,
- a3, Operand(cell_map));
- __ bind(&okay_here);
+ // We should either have undefined in a2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(a2, t0);
}
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
-
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ lw(t0, FieldMemOperand(a3, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&no_info, ne, t0, Operand(at));
- __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
+ __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -6099,34 +5203,25 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
- __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
- InternalArrayNoArgumentConstructorStub stub0(kind);
- __ TailCallStub(&stub0);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0, lo, a0, Operand(1));
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN, hi, a0, Operand(1));
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
__ lw(at, MemOperand(sp, 0));
- __ Branch(&normal_sequence, eq, at, Operand(zero_reg));
InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
}
- __ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(kind);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
-
- __ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
- __ TailCallStub(&stubN);
}
@@ -6160,7 +5255,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// but the following bit field extraction takes care of that anyway.
__ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ __ DecodeField<Map::ElementsKindBits>(a3);
if (FLAG_debug_code) {
Label done;
@@ -6180,6 +5275,140 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : callee
+ // -- t0 : call_data
+ // -- a2 : holder
+ // -- a1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = a0;
+ Register call_data = t0;
+ Register holder = a2;
+ Register api_function_address = a1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // Save context, callee and call data.
+ __ Push(context, callee, call_data);
+ // Load context from callee.
+ __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // Push return value and default return value.
+ __ Push(scratch, scratch);
+ __ li(scratch,
+ Operand(ExternalReference::isolate_address(isolate())));
+ // Push isolate and holder.
+ __ Push(scratch, holder);
+
+ // Prepare arguments.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ ASSERT(!api_function_address.is(a0) && !scratch.is(a0));
+ // a0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Addu(a0, sp, Operand(1 * kPointerSize));
+ // FunctionCallbackInfo::implicit_args_
+ __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ sw(at, MemOperand(a0, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(at, Operand(argc));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument.
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- a2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = a2;
+
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // a1 (internal::Object** args_) as the data.
+ __ sw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/mips/code-stubs-mips.h b/chromium/v8/src/mips/code-stubs-mips.h
index c3e05b8a2d4..3e0eaa160e0 100644
--- a/chromium/v8/src/mips/code-stubs-mips.h
+++ b/chromium/v8/src/mips/code-stubs-mips.h
@@ -1,34 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_CODE_STUBS_ARM_H_
#define V8_MIPS_CODE_STUBS_ARM_H_
-#include "ic-inl.h"
+#include "src/ic-inl.h"
namespace v8 {
@@ -38,34 +15,10 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) {}
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
@@ -82,50 +35,17 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
- bool ascii);
+ String::Encoding encoding);
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
@@ -144,47 +64,48 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
+};
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
+class StoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ explicit StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
+ : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm);
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() { return StoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
- const StringAddFlags flags_;
+ void Generate(MacroAssembler* masm);
};
-
-class SubStringStub: public PlatformCodeStub {
+class RestoreRegistersStateStub: public PlatformCodeStub {
public:
- SubStringStub() {}
+ explicit RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
+ : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+ static void GenerateAheadOfTime(Isolate* isolate);
private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
+ Major MajorKey() { return RestoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
void Generate(MacroAssembler* masm);
};
-
class StringCompareStub: public PlatformCodeStub {
public:
- StringCompareStub() { }
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
// Compare two flat ASCII strings and returns result in v0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -225,11 +146,13 @@ class StringCompareStub: public PlatformCodeStub {
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
- WriteInt32ToHeapNumberStub(Register the_int,
+ WriteInt32ToHeapNumberStub(Isolate* isolate,
+ Register the_int,
Register the_heap_number,
Register scratch,
Register scratch2)
- : the_int_(the_int),
+ : PlatformCodeStub(isolate),
+ the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch),
sign_(scratch2) {
@@ -268,12 +191,14 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
class RecordWriteStub: public PlatformCodeStub {
public:
- RecordWriteStub(Register object,
+ RecordWriteStub(Isolate* isolate,
+ Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
- : object_(object),
+ : PlatformCodeStub(isolate),
+ object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
@@ -419,7 +344,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
@@ -458,7 +383,7 @@ class RecordWriteStub: public PlatformCodeStub {
// moved by GC
class DirectCEntryStub: public PlatformCodeStub {
public:
- DirectCEntryStub() {}
+ explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
@@ -474,7 +399,8 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+ NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+ : PlatformCodeStub(isolate), mode_(mode) { }
void Generate(MacroAssembler* masm);
diff --git a/chromium/v8/src/mips/codegen-mips.cc b/chromium/v8/src/mips/codegen-mips.cc
index 3a87c5af886..5d613d0fb0a 100644
--- a/chromium/v8/src/mips/codegen-mips.cc
+++ b/chromium/v8/src/mips/codegen-mips.cc
@@ -1,54 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "codegen.h"
-#include "macro-assembler.h"
-#include "simulator-mips.h"
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/mips/simulator-mips.h"
namespace v8 {
namespace internal {
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- return NULL;
-}
-
-
#define __ masm.
@@ -62,10 +27,10 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -79,21 +44,13 @@ UnaryMathFunction CreateExpFunction() {
Register temp2 = t1;
Register temp3 = t2;
- if (!IsMipsSoftFloatABI) {
- // Input value is in f12 anyway, nothing to do.
- } else {
- __ Move(input, a0, a1);
- }
+ __ MovFromFloatParameter(input);
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(
&masm, input, result, double_scratch1, double_scratch2,
temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
- if (!IsMipsSoftFloatABI) {
- // Result is already in f0, nothing to do.
- } else {
- __ Move(v0, v1, result);
- }
+ __ MovToFloatResult(result);
__ Ret();
}
@@ -113,13 +70,564 @@ UnaryMathFunction CreateExpFunction() {
}
-#undef __
+#if defined(V8_HOST_ARCH_MIPS)
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+ return stub;
+#else
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
+ if (buffer == NULL) return stub;
+
+ // This code assumes that cache lines are 32 bytes and if the cache line is
+ // larger it will not work correctly.
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ Label lastb, unaligned, aligned, chkw,
+ loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
+ leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
+ ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
+
+ // The size of each prefetch.
+ uint32_t pref_chunk = 32;
+ // The maximum size of a prefetch, it must not be less then pref_chunk.
+ // If the real size of a prefetch is greater then max_pref_size and
+ // the kPrefHintPrepareForStore hint is used, the code will not work
+ // correctly.
+ uint32_t max_pref_size = 128;
+ ASSERT(pref_chunk < max_pref_size);
+
+ // pref_limit is set based on the fact that we never use an offset
+ // greater then 5 on a store pref and that a single pref can
+ // never be larger then max_pref_size.
+ uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
+ int32_t pref_hint_load = kPrefHintLoadStreamed;
+ int32_t pref_hint_store = kPrefHintPrepareForStore;
+ uint32_t loadstore_chunk = 4;
+
+ // The initial prefetches may fetch bytes that are before the buffer being
+ // copied. Start copies with an offset of 4 so avoid this situation when
+ // using kPrefHintPrepareForStore.
+ ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
+ pref_chunk * 4 >= max_pref_size);
+
+ // If the size is less than 8, go to lastb. Regardless of size,
+ // copy dst pointer to v0 for the retuen value.
+ __ slti(t2, a2, 2 * loadstore_chunk);
+ __ bne(t2, zero_reg, &lastb);
+ __ mov(v0, a0); // In delay slot.
+
+ // If src and dst have different alignments, go to unaligned, if they
+ // have the same alignment (but are not actually aligned) do a partial
+ // load/store to make them aligned. If they are both already aligned
+ // we can start copying at aligned.
+ __ xor_(t8, a1, a0);
+ __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
+ __ bne(t8, zero_reg, &unaligned);
+ __ subu(a3, zero_reg, a0); // In delay slot.
+
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &aligned); // Already aligned.
+ __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
+
+ if (kArchEndian == kLittle) {
+ __ lwr(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swr(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ } else {
+ __ lwl(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swl(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ }
+ // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
+ // count how many bytes we have to copy after all the 64 byte chunks are
+ // copied and a3 to the dst pointer after all the 64 byte chunks have been
+ // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&aligned);
+ __ andi(t8, a2, 0x3f);
+ __ beq(a2, t8, &chkw); // Less than 64?
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
+
+ // When in the loop we prefetch with kPrefHintPrepareForStore hint,
+ // in this case the a0+x should be past the "t0-32" address. This means:
+ // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
+ // x=64 the last "safe" a0 address is "t0-96". In the current version we
+ // will use "pref hint, 128(a0)", so "t0-160" is the limit.
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(t0, a0, a2); // t0 is the "past the end" address.
+ __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+ __ bind(&loop16w);
+ __ lw(t0, MemOperand(a1));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
+ __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&skip_pref);
+ __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+
+ __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+
+ __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here we have src and dest word-aligned but less than 64-bytes to go.
+ // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
+ // down to chk1w to handle the tail end of the copy.
+ __ bind(&chkw);
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ andi(t8, a2, 0x1f);
+ __ beq(a2, t8, &chk1w); // Less than 32?
+ __ nop(); // In delay slot.
+ __ lw(t0, MemOperand(a1));
+ __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Here we have less than 32 bytes to copy. Set up for a loop to copy
+ // one word at a time. Set a2 to count how many bytes we have to copy
+ // after all the word chunks are copied and a3 to the dst pointer after
+ // all the word chunks have been copied. We will loop, incrementing a0
+ // and a1 untill a0 equals a3.
+ __ bind(&chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &lastb);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&wordCopy_loop);
+ __ lw(t3, MemOperand(a1));
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &wordCopy_loop);
+ __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ __ bind(&lastb);
+ __ Branch(&leave, le, a2, Operand(zero_reg));
+ __ addu(a3, a0, a2);
+
+ __ bind(&lastbloop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &lastbloop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ bind(&leave);
+ __ jr(ra);
+ __ nop();
+
+ // Unaligned case. Only the dst gets aligned so we need to do partial
+ // loads of the source followed by normal stores to the dst (once we
+ // have aligned the destination).
+ __ bind(&unaligned);
+ __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
+ __ beq(a3, zero_reg, &ua_chk16w);
+ __ subu(a2, a2, a3); // In delay slot.
+
+ if (kArchEndian == kLittle) {
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swr(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ } else {
+ __ lwl(v1, MemOperand(a1));
+ __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swl(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ }
+
+ // Now the dst (but not the source) is aligned. Set a2 to count how many
+ // bytes we have to copy after all the 64 byte chunks are copied and a3 to
+ // the dst pointer after all the 64 byte chunks have been copied. We will
+ // loop, incrementing a0 and a1 until a0 equals a3.
+ __ bind(&ua_chk16w);
+ __ andi(t8, a2, 0x3f);
+ __ beq(a2, t8, &ua_chkw);
+ __ subu(a3, a2, t8); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ addu(t0, a0, a2);
+ __ Subu(t9, t0, pref_limit);
+ }
+
+ __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+ __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+
+ if (pref_hint_store != kPrefHintPrepareForStore) {
+ __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+ }
+
+ __ bind(&ua_loop16w);
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+ if (kArchEndian == kLittle) {
+ __ lwr(t0, MemOperand(a1));
+ __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(t0, MemOperand(a1));
+ __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwr(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ if (kArchEndian == kLittle) {
+ __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
+ __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
+ __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
+ __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
+ __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
+ __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
+ __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
+ __ lwr(t0,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t4,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t5,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t6,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t7,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+ __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+ __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+ __ addiu(a0, a0, 16 * loadstore_chunk);
+ __ bne(a0, a3, &ua_loop16w);
+ __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
+ __ mov(a2, t8);
+
+ // Here less than 64-bytes. Check for
+ // a 32 byte chunk and copy if there is one. Otherwise jump down to
+ // ua_chk1w to handle the tail end of the copy.
+ __ bind(&ua_chkw);
+ __ Pref(pref_hint_load, MemOperand(a1));
+ __ andi(t8, a2, 0x1f);
+
+ __ beq(a2, t8, &ua_chk1w);
+ __ nop(); // In delay slot.
+ if (kArchEndian == kLittle) {
+ __ lwr(t0, MemOperand(a1));
+ __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(t0, MemOperand(a1));
+ __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
+ __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
+ __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
+ __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
+ __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
+ __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
+ __ lwr(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ addiu(a1, a1, 8 * loadstore_chunk);
+ __ sw(t0, MemOperand(a0));
+ __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+ __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+ __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+ __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+ __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+ __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+ __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+ __ addiu(a0, a0, 8 * loadstore_chunk);
+
+ // Less than 32 bytes to copy. Set up for a loop to
+ // copy one word at a time.
+ __ bind(&ua_chk1w);
+ __ andi(a2, t8, loadstore_chunk - 1);
+ __ beq(a2, t8, &ua_smallCopy);
+ __ subu(a3, t8, a2); // In delay slot.
+ __ addu(a3, a0, a3);
+
+ __ bind(&ua_wordCopy_loop);
+ if (kArchEndian == kLittle) {
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(v1, MemOperand(a1));
+ __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ }
+ __ addiu(a0, a0, loadstore_chunk);
+ __ addiu(a1, a1, loadstore_chunk);
+ __ bne(a0, a3, &ua_wordCopy_loop);
+ __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+
+ // Copy the last 8 bytes.
+ __ bind(&ua_smallCopy);
+ __ beq(a2, zero_reg, &leave);
+ __ addu(a3, a0, a2); // In delay slot.
+
+ __ bind(&ua_smallCopy_loop);
+ __ lb(v1, MemOperand(a1));
+ __ addiu(a0, a0, 1);
+ __ addiu(a1, a1, 1);
+ __ bne(a0, a3, &ua_smallCopy_loop);
+ __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+
+ __ jr(ra);
+ __ nop();
+ }
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<MemCopyUint8Function>(buffer);
+#endif
+}
+#endif
UnaryMathFunction CreateSqrtFunction() {
- return &sqrt;
+#if defined(USE_SIMULATOR)
+ return &std::sqrt;
+#else
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::sqrt;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ __ MovFromFloatParameter(f12);
+ __ sqrt_d(f0, f12);
+ __ MovToFloatResult(f0);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
}
+#undef __
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -290,8 +798,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
}
- __ sw(t0, MemOperand(t3)); // mantissa
- __ sw(t1, MemOperand(t3, kIntSize)); // exponent
+ __ sw(t0, MemOperand(t3, Register::kMantissaOffset)); // mantissa
+ __ sw(t1, MemOperand(t3, Register::kExponentOffset)); // exponent
__ Addu(t3, t3, kDoubleSize);
__ bind(&entry);
@@ -341,7 +849,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
// Prepare for conversion loop.
- __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+ __ Addu(t0, t0, Operand(
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag
+ + Register::kExponentOffset));
__ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
__ Addu(t2, t2, Operand(kHeapObjectTag));
__ sll(t1, t1, 1);
@@ -350,7 +860,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses.
// a3: begin of destination FixedArray element fields, not tagged
- // t0: begin of source FixedDoubleArray element fields, not tagged, +4
+ // t0: begin of source FixedDoubleArray element fields, not tagged,
+ // points to the exponent
// t1: end of destination FixedArray, not tagged
// t2: destination FixedArray
// t3: the-hole pointer
@@ -373,7 +884,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
// a2: new heap number
- __ lw(a0, MemOperand(t0, -12));
+ // Load mantissa of current element, t0 point to exponent of next element.
+ __ lw(a0, MemOperand(t0, (Register::kMantissaOffset
+ - Register::kExponentOffset - kDoubleSize)));
__ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
__ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
__ mov(a0, a3);
@@ -492,7 +1005,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
at, Operand(zero_reg));
}
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ And(at, result, Operand(kShortExternalStringMask));
__ Branch(call_runtime, ne, at, Operand(zero_reg));
__ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
@@ -578,8 +1091,8 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
__ sll(at, temp2, 3);
__ Addu(temp3, temp3, Operand(at));
- __ lw(temp2, MemOperand(temp3, 0));
- __ lw(temp3, MemOperand(temp3, kPointerSize));
+ __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
+ __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
// The first word is loaded is the lower number register.
if (temp2.code() < temp3.code()) {
__ sll(at, temp1, 20);
@@ -591,11 +1104,11 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Move(double_scratch1, temp3, temp1);
}
__ mul_d(result, result, double_scratch1);
- __ Branch(&done);
+ __ BranchShort(&done);
__ bind(&zero);
__ Move(result, kDoubleRegZero);
- __ Branch(&done);
+ __ BranchShort(&done);
__ bind(&infinity);
__ ldc1(result, ExpConstant(2, temp3));
@@ -603,42 +1116,47 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ bind(&done);
}
-
+#ifdef DEBUG
// nop(CODE_AGE_MARKER_NOP)
static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+#endif
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found in FUNCTIONS
- static bool initialized = false;
- static uint32_t sequence[kNoCodeAgeSequenceLength];
- byte* byte_sequence = reinterpret_cast<byte*>(sequence);
- *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
- if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->Push(ra, fp, cp, a1);
- patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- patcher.masm()->Addu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- initialized = true;
- }
- return byte_sequence;
+
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // Since patcher is a large object, allocate it dynamically when needed,
+ // to avoid overloading the stack in stress conditions.
+ // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+ // the process, before MIPS simulator ICache is setup.
+ SmartPointer<CodePatcher> patcher(
+ new CodePatcher(young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
+ PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+ patcher->masm()->Push(ra, fp, cp, a1);
+ patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ patcher->masm()->Addu(
+ fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = !memcmp(sequence, young_sequence, young_length);
- ASSERT(result ||
- Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
+ if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
@@ -654,10 +1172,9 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
- CopyBytes(sequence, young_sequence, young_length);
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
diff --git a/chromium/v8/src/mips/codegen-mips.h b/chromium/v8/src/mips/codegen-mips.h
index 822b94ad799..82a410ec235 100644
--- a/chromium/v8/src/mips/codegen-mips.h
+++ b/chromium/v8/src/mips/codegen-mips.h
@@ -1,85 +1,21 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
-#include "ast.h"
-#include "ic-inl.h"
+#include "src/ast.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- explicit CodeGenerator(Isolate* isolate) {
- InitializeAstVisitor(isolate);
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/chromium/v8/src/mips/constants-mips.cc b/chromium/v8/src/mips/constants-mips.cc
index 2dd7a31f388..f14992719db 100644
--- a/chromium/v8/src/mips/constants-mips.cc
+++ b/chromium/v8/src/mips/constants-mips.cc
@@ -1,35 +1,12 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "constants-mips.h"
+#include "src/mips/constants-mips.h"
namespace v8 {
namespace internal {
@@ -174,7 +151,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
return true;
default:
return false;
- };
+ }
break;
case SPECIAL:
switch (FunctionFieldRaw()) {
@@ -183,11 +160,11 @@ bool Instruction::IsForbiddenInBranchDelay() const {
return true;
default:
return false;
- };
+ }
break;
default:
return false;
- };
+ }
}
@@ -203,17 +180,17 @@ bool Instruction::IsLinkingInstruction() const {
return true;
default:
return false;
- };
+ }
case SPECIAL:
switch (FunctionFieldRaw()) {
case JALR:
return true;
default:
return false;
- };
+ }
default:
return false;
- };
+ }
}
@@ -232,7 +209,7 @@ bool Instruction::IsTrap() const {
return true;
default:
return false;
- };
+ }
}
}
@@ -278,7 +255,7 @@ Instruction::Type Instruction::InstructionType() const {
return kRegisterType;
default:
return kUnsupported;
- };
+ }
break;
case SPECIAL2:
switch (FunctionFieldRaw()) {
@@ -287,7 +264,7 @@ Instruction::Type Instruction::InstructionType() const {
return kRegisterType;
default:
return kUnsupported;
- };
+ }
break;
case SPECIAL3:
switch (FunctionFieldRaw()) {
@@ -296,7 +273,7 @@ Instruction::Type Instruction::InstructionType() const {
return kRegisterType;
default:
return kUnsupported;
- };
+ }
break;
case COP1: // Coprocessor instructions.
switch (RsFieldRawNoAssert()) {
@@ -304,7 +281,7 @@ Instruction::Type Instruction::InstructionType() const {
return kImmediateType;
default:
return kRegisterType;
- };
+ }
break;
case COP1X:
return kRegisterType;
@@ -349,7 +326,7 @@ Instruction::Type Instruction::InstructionType() const {
return kJumpType;
default:
return kUnsupported;
- };
+ }
return kUnsupported;
}
diff --git a/chromium/v8/src/mips/constants-mips.h b/chromium/v8/src/mips/constants-mips.h
index 5a0870fd218..fc64f7dbbf9 100644
--- a/chromium/v8/src/mips/constants-mips.h
+++ b/chromium/v8/src/mips/constants-mips.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
@@ -55,6 +32,18 @@ enum ArchVariants {
static const ArchVariants kArchVariant = kMips32r1;
#endif
+enum Endianness {
+ kLittle,
+ kBig
+};
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static const Endianness kArchEndian = kLittle;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static const Endianness kArchEndian = kBig;
+#else
+#error Unknown endianness
+#endif
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
// Use floating-point coprocessor instructions. This flag is raised when
@@ -69,6 +58,15 @@ const bool IsMipsSoftFloatABI = true;
const bool IsMipsSoftFloatABI = true;
#endif
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const uint32_t kHoleNanUpper32Offset = 4;
+const uint32_t kHoleNanLower32Offset = 0;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const uint32_t kHoleNanUpper32Offset = 0;
+const uint32_t kHoleNanLower32Offset = 4;
+#else
+#error Unknown endianness
+#endif
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
@@ -124,6 +122,16 @@ const uint32_t kFCSRFlagMask =
const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+// 'pref' instruction hints
+const int32_t kPrefHintLoad = 0;
+const int32_t kPrefHintStore = 1;
+const int32_t kPrefHintLoadStreamed = 4;
+const int32_t kPrefHintStoreStreamed = 5;
+const int32_t kPrefHintLoadRetained = 6;
+const int32_t kPrefHintStoreRetained = 7;
+const int32_t kPrefHintWritebackInvalidate = 25;
+const int32_t kPrefHintPrepareForStore = 30;
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
@@ -297,6 +305,8 @@ enum Opcode {
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
+ PREF = ((6 << 3) + 3) << kOpcodeShift,
+
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift,
@@ -494,7 +504,8 @@ inline Condition NegateCondition(Condition cc) {
}
-inline Condition ReverseCondition(Condition cc) {
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cc) {
switch (cc) {
case Uless:
return Ugreater;
@@ -514,7 +525,7 @@ inline Condition ReverseCondition(Condition cc) {
return greater_equal;
default:
return cc;
- };
+ }
}
diff --git a/chromium/v8/src/mips/cpu-mips.cc b/chromium/v8/src/mips/cpu-mips.cc
index 49d0b377ebc..ce471265bb2 100644
--- a/chromium/v8/src/mips/cpu-mips.cc
+++ b/chromium/v8/src/mips/cpu-mips.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
@@ -34,29 +11,19 @@
#include <asm/cachectl.h>
#endif // #ifdef __mips
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "cpu.h"
-#include "macro-assembler.h"
+#include "src/cpu.h"
+#include "src/macro-assembler.h"
-#include "simulator.h" // For cache flushing.
+#include "src/simulator.h" // For cache flushing.
namespace v8 {
namespace internal {
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(FPU);
-}
-
-
void CPU::FlushICache(void* start, size_t size) {
// Nothing to do, flushing no instructions.
if (size == 0) {
diff --git a/chromium/v8/src/mips/debug-mips.cc b/chromium/v8/src/mips/debug-mips.cc
index 1535231dd81..fc052114233 100644
--- a/chromium/v8/src/mips/debug-mips.cc
+++ b/chromium/v8/src/mips/debug-mips.cc
@@ -1,44 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "codegen.h"
-#include "debug.h"
+#include "src/codegen.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
@@ -58,9 +33,8 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
// li and Call pseudo-instructions emit two instructions each.
- patcher.masm()->li(v8::internal::t9,
- Operand(reinterpret_cast<int32_t>(
- debug_info_->GetIsolate()->debug()->debug_break_return()->entry())));
+ patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())));
patcher.masm()->Call(v8::internal::t9);
patcher.masm()->nop();
patcher.masm()->nop();
@@ -105,7 +79,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// call t9 (jalr t9 / nop instruction pair)
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
- debug_info_->GetIsolate()->debug()->debug_break_slot()->entry())));
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())));
patcher.masm()->Call(v8::internal::t9);
}
@@ -116,8 +90,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions);
}
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
#define __ ACCESS_MASM(masm)
@@ -156,7 +128,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ PrepareCEntryArgs(0); // No arguments.
__ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
- CEntryStub ceb(1);
+ CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
@@ -181,14 +153,25 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
- __ li(t9, Operand(
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate())));
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ li(t9, Operand(after_break_target));
__ lw(t9, MemOperand(t9));
__ Jump(t9);
}
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- a3 : slot in feedback array (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a3.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-mips.cc).
// ----------- S t a t e -------------
// -- a2 : name
@@ -202,7 +185,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-mips.cc).
// ----------- S t a t e -------------
// -- a0 : value
@@ -216,7 +199,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
@@ -225,7 +208,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -235,7 +218,7 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- a0 : value
@@ -244,16 +227,7 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC call (from ic-mips.cc).
- // ----------- S t a t e -------------
- // -- a2: name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a2.bit(), 0);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that v0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
@@ -261,7 +235,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a1 : function
@@ -270,17 +244,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- a2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
@@ -290,18 +254,20 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
// -- a1 : constructor function
- // -- a2 : cache cell for call target
+ // -- a2 : feedback array
+ // -- a3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
}
-void Debug::GenerateSlot(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
@@ -316,30 +282,27 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
}
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0);
}
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
}
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
}
-const bool Debug::kFrameDropperSupported = false;
+const bool LiveEdit::kFrameDropperSupported = false;
#undef __
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/mips/deoptimizer-mips.cc b/chromium/v8/src/mips/deoptimizer-mips.cc
index 0662b17366b..71c82fb8af7 100644
--- a/chromium/v8/src/mips/deoptimizer-mips.cc
+++ b/chromium/v8/src/mips/deoptimizer-mips.cc
@@ -1,37 +1,14 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
@@ -49,13 +26,36 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->break_(0xCC);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->break_(0xCC);
+ }
+ }
+
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
@@ -125,11 +125,6 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
-Code* Deoptimizer::NotifyStubFailureBuiltin() {
- return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
-}
-
-
#define __ masm()->
@@ -239,13 +234,13 @@ void Deoptimizer::EntryGenerator::Generate() {
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
- __ Branch(&pop_loop_header);
+ __ BranchShort(&pop_loop_header);
__ bind(&pop_loop);
__ pop(t0);
__ sw(t0, MemOperand(a3, 0));
__ addiu(a3, a3, sizeof(uint32_t));
__ bind(&pop_loop_header);
- __ Branch(&pop_loop, ne, a2, Operand(sp));
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
@@ -280,11 +275,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3);
__ bind(&inner_loop_header);
- __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
__ Addu(t0, t0, Operand(kPointerSize));
__ bind(&outer_loop_header);
- __ Branch(&outer_push_loop, lt, t0, Operand(a1));
+ __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
@@ -371,6 +366,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/chromium/v8/src/mips/disasm-mips.cc b/chromium/v8/src/mips/disasm-mips.cc
index 691df940f2d..82a47582ad3 100644
--- a/chromium/v8/src/mips/disasm-mips.cc
+++ b/chromium/v8/src/mips/disasm-mips.cc
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// A Disassembler object is used to disassemble a block of code instruction by
// instruction. The default implementation of the NameConverter object can be
@@ -51,14 +28,14 @@
#include <stdarg.h>
#include <string.h>
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "mips/constants-mips.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "platform.h"
+#include "src/mips/constants-mips.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
@@ -207,21 +184,21 @@ void Decoder::PrintFd(Instruction* instr) {
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
int sa = instr->SaValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
}
// Print the integer value of the rd field, when it is not used as reg.
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
}
// Print the integer value of the rd field, when used as 'ext' size.
void Decoder::PrintSs1(Instruction* instr) {
int ss = instr->RdValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
}
@@ -230,49 +207,49 @@ void Decoder::PrintSs2(Instruction* instr) {
int ss = instr->RdValue();
int pos = instr->SaValue();
out_buffer_pos_ +=
- OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
}
// Print the integer value of the cc field for the bc1t/f instructions.
void Decoder::PrintBc(Instruction* instr) {
int cc = instr->FBccValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
}
// Print the integer value of the cc field for the FP compare instructions.
void Decoder::PrintCc(Instruction* instr) {
int cc = instr->FCccValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
}
// Print 16-bit unsigned immediate value.
void Decoder::PrintUImm16(Instruction* instr) {
int32_t imm = instr->Imm16Value();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
}
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
int32_t imm = instr->Imm16Value();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
uint32_t imm = instr->Imm26Value() << kImmFieldShift;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
@@ -283,8 +260,8 @@ void Decoder::PrintCode(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "0x%05x (%d)", code, code);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "0x%05x (%d)", code, code);
break;
}
case TGE:
@@ -295,12 +272,12 @@ void Decoder::PrintCode(Instruction* instr) {
case TNE: {
int32_t code = instr->Bits(15, 6);
out_buffer_pos_ +=
- OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
break;
}
default: // Not a break or trap instruction.
break;
- };
+ }
}
@@ -430,7 +407,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintCc(instr);
return 2;
}
- };
+ }
UNREACHABLE();
return -1;
}
@@ -626,7 +603,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
@@ -819,7 +796,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break; // Case COP1.
case REGIMM:
switch (instr->RtFieldRaw()) {
@@ -899,6 +876,9 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case LWR:
Format(instr, "lwr 'rt, 'imm16s('rs)");
break;
+ case PREF:
+ Format(instr, "pref 'rt, 'imm16s('rs)");
+ break;
case SB:
Format(instr, "sb 'rt, 'imm16s('rs)");
break;
@@ -929,7 +909,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
default:
UNREACHABLE();
break;
- };
+ }
}
@@ -951,9 +931,9 @@ void Decoder::DecodeTypeJump(Instruction* instr) {
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
switch (instr->InstructionType()) {
case Instruction::kRegisterType: {
DecodeTypeRegister(instr);
@@ -985,7 +965,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
diff --git a/chromium/v8/src/mips/frames-mips.cc b/chromium/v8/src/mips/frames-mips.cc
index 1bd511654a1..5da00801c77 100644
--- a/chromium/v8/src/mips/frames-mips.cc
+++ b/chromium/v8/src/mips/frames-mips.cc
@@ -1,39 +1,15 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "assembler.h"
-#include "assembler-mips.h"
-#include "assembler-mips-inl.h"
-#include "frames.h"
+#include "src/assembler.h"
+#include "src/mips/assembler-mips.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/frames.h"
namespace v8 {
namespace internal {
@@ -41,10 +17,24 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
} } // namespace v8::internal
diff --git a/chromium/v8/src/mips/frames-mips.h b/chromium/v8/src/mips/frames-mips.h
index 55951b58c47..5666f642f99 100644
--- a/chromium/v8/src/mips/frames-mips.h
+++ b/chromium/v8/src/mips/frames-mips.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
@@ -110,8 +87,6 @@ const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters =
kNumJSCallerSaved + kNumCalleeSaved;
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
const int kUndefIndex = -1;
// Map with indexes on stack that corresponds to codes of saved registers.
const int kSafepointRegisterStackIndexMap[kNumRegs] = {
@@ -161,12 +136,9 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- // See some explanation in MacroAssembler::EnterExitFrame.
- // This marks the top of the extra allocated stack space.
- static const int kStackSpaceOffset = -3 * kPointerSize;
+ static const int kFrameSize = 2 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
-
static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
@@ -179,6 +151,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP.
static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used.
};
diff --git a/chromium/v8/src/mips/full-codegen-mips.cc b/chromium/v8/src/mips/full-codegen-mips.cc
index 3ce2ab5f19c..41acad355f9 100644
--- a/chromium/v8/src/mips/full-codegen-mips.cc
+++ b/chromium/v8/src/mips/full-codegen-mips.cc
@@ -1,31 +1,8 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
@@ -37,18 +14,18 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
-#include "mips/code-stubs-mips.h"
-#include "mips/macro-assembler-mips.h"
+#include "src/mips/code-stubs-mips.h"
+#include "src/mips/macro-assembler-mips.h"
namespace v8 {
namespace internal {
@@ -84,7 +61,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ bind(&patch_site_);
__ andi(at, reg, 0);
// Always taken before patched.
- __ Branch(target, eq, at, Operand(zero_reg));
+ __ BranchShort(target, eq, at, Operand(zero_reg));
}
// When initially emitting this ensure that a jump is never generated to skip
@@ -95,7 +72,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ bind(&patch_site_);
__ andi(at, reg, 0);
// Never taken before patched.
- __ Branch(target, ne, at, Operand(zero_reg));
+ __ BranchShort(target, ne, at, Operand(zero_reg));
}
void EmitPatchInfo() {
@@ -138,6 +115,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -152,16 +130,21 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). t1 is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+ __ lw(at, MemOperand(sp, receiver_offset));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ lw(a2, GlobalObjectOperand());
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+
__ sw(a2, MemOperand(sp, receiver_offset));
+
__ bind(&ok);
}
@@ -171,7 +154,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(BUILD_FUNCTION_FRAME);
+ __ Prologue(info->IsCodePreAgingActive());
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -179,21 +162,35 @@ void FullCodeGenerator::Generate() {
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- // Emit a loop to initialize stack cells for locals when optimizing for
- // size. Otherwise, unroll the loop for maximum performance.
+ if (locals_count >= 128) {
+ Label ok;
+ __ Subu(t5, sp, Operand(locals_count * kPointerSize));
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ Branch(&ok, hs, t5, Operand(a2));
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
__ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
- if (FLAG_optimize_for_size && locals_count > 4) {
- Label loop;
- __ li(a2, Operand(locals_count));
- __ bind(&loop);
- __ Subu(a2, a2, 1);
- __ push(t5);
- __ Branch(&loop, gt, a2, Operand(zero_reg));
- } else {
- for (int i = 0; i < locals_count; i++) {
- __ push(t5);
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ li(a2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ __ Subu(sp, sp, Operand(kMaxPushes * kPointerSize));
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ sw(t5, MemOperand(sp, i * kPointerSize));
}
+ // Continue loop if not done.
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&loop_header, ne, a2, Operand(zero_reg));
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ __ Subu(sp, sp, Operand(remaining * kPointerSize));
+ for (int i = 0; i < remaining; i++) {
+ __ sw(t5, MemOperand(sp, i * kPointerSize));
}
}
}
@@ -205,20 +202,25 @@ void FullCodeGenerator::Generate() {
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1.
- __ push(a1);
+ bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ push(a1);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ push(a1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both v0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Context is returned in v0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, v0);
+ __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -233,8 +235,15 @@ void FullCodeGenerator::Generate() {
__ sw(a0, target);
// Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, a0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
}
@@ -262,14 +271,14 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(type);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, v0, a1, a2);
@@ -293,7 +302,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -303,9 +312,12 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(t0));
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+ PredictableCodeSizeScope predictable(masm_,
+ masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+ __ Call(stack_check, RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -341,11 +353,7 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- if (isolate()->IsDebuggerActive()) {
+ if (info_->is_debug()) {
// Detect debug break requests as soon as possible.
reset_value = FLAG_interrupt_budget >> 4;
}
@@ -365,13 +373,10 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
__ beq(at, zero_reg, &ok);
@@ -404,32 +409,24 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ Branch(&ok, ge, a3, Operand(zero_reg));
- __ push(v0);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(a2);
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(v0);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ Branch(&ok, ge, a3, Operand(zero_reg));
+ __ push(v0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ pop(v0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
@@ -686,7 +683,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
+ CallIC(ic, condition->test_id());
__ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -809,7 +806,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -859,7 +856,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
__ Push(cp, a2, a1, a0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -915,7 +912,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(cp, a2, a1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -987,7 +984,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(cp, a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -995,7 +992,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -1051,9 +1048,18 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
+ Label skip;
+ __ Branch(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&next_test, ne, v0, Operand(at));
+ __ Drop(1);
+ __ Branch(clause->body_target());
+ __ bind(&skip);
+
__ Branch(&next_test, ne, v0, Operand(zero_reg));
__ Drop(1); // Switch value is no longer needed.
__ Branch(clause->body_target());
@@ -1085,6 +1091,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1157,10 +1164,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(v0); // Map.
__ li(a0, Operand(Smi::FromInt(0)));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(a2, a1, a0);
+ // Push map, enumeration cache, enumeration cache length (as smi) and zero.
+ __ Push(v0, a2, a1, a0);
__ jmp(&loop);
__ bind(&no_descriptors);
@@ -1171,13 +1177,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ li(a1, cell);
- __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ sw(a2, FieldMemOperand(a1, Cell::kValueOffset));
+ __ li(a1, FeedbackVector());
+ __ li(a2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+ __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1225,8 +1227,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
- __ push(a1); // Enumerable.
- __ push(a3); // Current entry.
+ __ Push(a1, a3); // Enumerable and current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ mov(a3, result_register());
__ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg));
@@ -1271,8 +1272,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Iteration loop_statement(this, stmt);
increment_loop_depth();
- // var iterator = iterable[@@iterator]()
- VisitForAccumulatorValue(stmt->assign_iterator());
+ // var iterable = subject
+ VisitForAccumulatorValue(stmt->assign_iterable());
__ mov(a0, v0);
// As with for-in, skip the loop if the iterator is null or undefined.
@@ -1281,17 +1282,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(loop_statement.break_label(), eq, a0, Operand(at));
- // Convert the iterator to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(a0, &convert);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ bind(&convert);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a0, v0);
- __ bind(&done_convert);
- __ push(a0);
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
// Loop entry.
__ bind(loop_statement.continue_label());
@@ -1338,7 +1330,9 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1346,7 +1340,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, a0, a1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(v0);
}
@@ -1368,7 +1362,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1380,7 +1374,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1405,11 +1399,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
}
@@ -1422,7 +1415,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1458,19 +1451,18 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ Branch(done);
@@ -1487,13 +1479,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallLoadIC(CONTEXTUAL);
context()->Plug(v0);
break;
}
@@ -1501,9 +1492,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1535,7 +1525,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1544,18 +1534,18 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(v0, var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
}
@@ -1568,15 +1558,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(v0);
}
@@ -1608,7 +1598,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ li(a2, Operand(expr->pattern()));
__ li(a1, Operand(expr->flags()));
__ Push(t0, a3, a2, a1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(t1, v0);
__ bind(&materialized);
@@ -1620,7 +1610,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ li(a0, Operand(Smi::FromInt(size)));
__ Push(t1, a0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(t1);
__ bind(&allocated);
@@ -1661,14 +1651,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
- FastCloneShallowObjectStub stub(properties_count);
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
@@ -1705,10 +1694,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(a0, result_register());
__ li(a2, Operand(key->value()));
__ lw(a1, MemOperand(sp));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1798,8 +1784,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
// If the only customer of allocation sites is transitioning, then
// we can turn it off if we don't have anywhere else to transition to.
@@ -1811,31 +1796,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
- if (has_fast_elements && constant_elements_values->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode,
- length);
- __ CallStub(&stub);
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
- 1, a1, a2);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
__ li(a0, Operand(Smi::FromInt(flags)));
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-
- if (has_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
@@ -1869,7 +1835,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
__ li(a3, Operand(Smi::FromInt(i)));
__ mov(a0, result_register());
- StoreArrayLiteralElementStub stub;
+ StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
@@ -1885,13 +1851,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2030,7 +1992,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
__ Branch(&post_runtime, eq, sp, Operand(a1));
__ push(v0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ pop(result_register());
@@ -2070,9 +2032,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_catch);
__ mov(a0, v0);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
- __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(a3, a0); // iter, exception
+ __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(a2, a3, a0); // "throw", iter, except
__ jmp(&l_call);
// try { received = %yield result }
@@ -2098,33 +2060,41 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a1, cp);
__ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
kRAHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(v0); // result
+ __ pop(v0); // result
EmitReturnSequence();
__ mov(a0, v0);
- __ bind(&l_resume); // received in a0
+ __ bind(&l_resume); // received in a0
__ PopTryHandler();
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
- __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(a3, a0); // iter, received
+ __ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(a2, a3, a0); // "next", iter, received
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
+ __ lw(a1, MemOperand(sp, kPointerSize));
+ __ lw(a0, MemOperand(sp, 2 * kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mov(a0, v0);
+ __ mov(a1, a0);
+ __ sw(a1, MemOperand(sp, 2 * kPointerSize));
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
__ mov(a0, v0);
__ push(a0); // save result
__ LoadRoot(a2, Heap::kdone_stringRootIndex); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in v0
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in v0
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2133,8 +2103,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// result.value
__ pop(a0); // result
__ LoadRoot(a2, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in v0
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in v0
context()->DropAndPlug(2, v0); // drop iter and g
break;
}
@@ -2146,18 +2115,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in a0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. a1
- // will hold the generator object until the activation has been resumed.
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // a1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
__ pop(a1);
// Check generator state.
- Label wrong_state, done;
+ Label wrong_state, closed_state, done;
__ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
- __ Branch(&wrong_state, le, a3, Operand(zero_reg));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ Branch(&closed_state, eq, a3, Operand(zero_reg));
+ __ Branch(&wrong_state, lt, a3, Operand(zero_reg));
// Load suspended function and context.
__ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
@@ -2226,14 +2197,29 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
ASSERT(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ push(a2);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(a0);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(a1);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2244,14 +2230,14 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
- Handle<Map> map(isolate()->native_context()->generator_result_map());
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
__ Allocate(map->instance_size(), v0, a2, a3, &gc_required, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ lw(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2282,8 +2268,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
__ li(a2, Operand(key->value()));
// Call load IC. It has arguments receiver and property name a0 and a2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
@@ -2292,7 +2277,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2319,9 +2304,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2330,13 +2314,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// recording binary operation stub, see
switch (op) {
case Token::SAR:
- __ Branch(&stub_call);
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ srav(right, left, scratch1);
__ And(v0, right, Operand(~kSmiTagMask));
break;
case Token::SHL: {
- __ Branch(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ sllv(scratch1, scratch1, scratch2);
@@ -2346,7 +2328,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::SHR: {
- __ Branch(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ srlv(scratch1, scratch1, scratch2);
@@ -2401,22 +2382,16 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ mov(a0, result_register());
__ pop(a1);
- BinaryOpICStub stub(op, mode);
+ BinaryOpICStub stub(isolate(), op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2442,10 +2417,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(a1, result_register());
__ pop(a0); // Restore value.
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2454,7 +2426,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->key());
__ mov(a1, result_register());
__ Pop(a0, a2); // a0 = restored value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2465,49 +2437,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ sw(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Move(a3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ li(a1, Operand(name));
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(a0, result_register());
__ li(a2, Operand(var->name()));
__ lw(a1, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallStoreIC();
- } else if (op == Token::INIT_CONST) {
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
+ if (var->IsLookupSlot()) {
+ __ li(a0, Operand(var->name()));
+ __ Push(v0, cp, a0); // Context and name.
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
- __ lw(a1, StackOperand(var));
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a1, Operand(t0));
- __ sw(result_register(), StackOperand(var));
+ MemOperand location = VarOperand(var, a1);
+ __ lw(a2, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a2, Operand(at));
+ EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(v0);
- __ li(a0, Operand(var->name()));
- __ Push(cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(v0); // Value.
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, a1, a0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2517,23 +2498,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Branch(&assign, ne, a3, Operand(t0));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ sw(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(a3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2541,24 +2518,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
}
- // Perform the assignment.
- __ sw(v0, location);
- if (var->IsContextSlot()) {
- __ mov(a3, v0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(v0); // Value.
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, a1, a0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
- // Non-initializing assignments to consts are ignored.
+ // Non-initializing assignments to consts are ignored.
}
@@ -2566,7 +2529,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->IsLiteral());
// Record source code position before IC call.
SetSourcePosition(expr->position());
@@ -2574,10 +2537,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
__ pop(a1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2597,10 +2557,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ mov(a0, result_register());
__ Pop(a2, a1); // a1 = key.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2627,73 +2587,70 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId id) {
ic_total_count_++;
- __ Call(code, rmode, id);
+ __ Call(code, RelocInfo::CODE_TARGET, id);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
}
- __ li(a2, Operand(name));
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ lw(v0, MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ lw(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sw(v0, MemOperand(sp, kPointerSize));
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(v0);
+
+ EmitCall(expr, call_type);
}
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(a1);
- __ push(v0);
- __ push(a1);
+ Expression* callee = expr->expression();
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0); // Drop the key still on the stack.
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ lw(a1, MemOperand(sp, 0));
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ lw(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sw(v0, MemOperand(sp, kPointerSize));
+
+ EmitCall(expr, CallIC::METHOD);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2701,20 +2658,17 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
VisitForStackValue(args->at(i));
}
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Record call targets.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ li(a2, Operand(cell));
-
- CallFunctionStub stub(arg_count, flags);
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2734,15 +2688,15 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
- // t0: the language mode.
- __ li(t0, Operand(Smi::FromInt(language_mode())));
+ // t0: the strict mode.
+ __ li(t0, Operand(Smi::FromInt(strict_mode())));
// a1: the start position of the scope the calls resides in.
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
__ Push(t2, t1, t0, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2755,12 +2709,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2789,20 +2742,18 @@ void FullCodeGenerator::VisitCall(Call* expr) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, v0);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ lw(a0, GlobalObjectOperand());
- __ push(a0);
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2817,7 +2768,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ASSERT(!context_register().is(a2));
__ li(a2, Operand(proxy->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ Push(v0, v1); // Function, receiver.
// If fast case code has been generated, emit code to push the
@@ -2831,37 +2782,34 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(v0);
// The receiver is implicitly the global receiver. Indicate this
// by passing the hole to the call function stub.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ push(a1);
__ bind(&call);
}
// The receiver is either the global receiver or an object found
- // by LoadContextSlot. That object could be the hole if the
- // receiver is implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
+ // by LoadContextSlot.
+ EmitCall(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithIC(expr, property->key());
+ EmitKeyedCallWithLoadIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ lw(a1, GlobalObjectOperand());
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ push(a1);
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCall(expr);
}
#ifdef DEBUG
@@ -2898,14 +2846,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ li(a2, Operand(cell));
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(v0);
}
@@ -3023,8 +2974,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
- __ And(at, a1, Operand(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ And(at, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3079,7 +3030,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ Addu(t0, t0, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Calculate the end of the descriptor array.
__ mov(a2, t0);
- __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ sll(t1, a3, kPointerSizeLog2);
__ Addu(a2, a2, t1);
// Loop through all the keys in the descriptor array. If one of these is the
@@ -3280,7 +3231,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
__ mov(a1, v0);
__ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3367,31 +3318,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- SubStringStub stub;
+ SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
@@ -3404,7 +3333,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
+ RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
@@ -3477,7 +3406,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(v0);
}
@@ -3498,9 +3427,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
if (FLAG_debug_code) {
__ SmiTst(value, at);
- __ ThrowIf(ne, kNonSmiValue, at, Operand(zero_reg));
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
__ SmiTst(index, at);
- __ ThrowIf(ne, kNonSmiIndex, at, Operand(zero_reg));
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
__ SmiUntag(index, index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
Register scratch = t5;
@@ -3535,9 +3464,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
if (FLAG_debug_code) {
__ SmiTst(value, at);
- __ ThrowIf(ne, kNonSmiValue, at, Operand(zero_reg));
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
__ SmiTst(index, at);
- __ ThrowIf(ne, kNonSmiIndex, at, Operand(zero_reg));
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
__ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
Register scratch = t5;
@@ -3563,7 +3492,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub(MathPowStub::ON_STACK);
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3606,7 +3535,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
__ mov(a0, result_register());
- NumberToStringStub stub;
+ NumberToStringStub stub(isolate());
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3730,21 +3659,13 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
- if (FLAG_new_string_add) {
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(a1);
- __ mov(a0, result_register()); // NewStringAddStub requires args in a0, a1.
- NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- } else {
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
- }
+ __ pop(a1);
+ __ mov(a0, result_register()); // StringAddStub requires args in a0, a1.
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
context()->Plug(v0);
}
@@ -3756,35 +3677,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringCompareStub stub;
+ StringCompareStub stub(isolate());
__ CallStub(&stub);
context()->Plug(v0);
}
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
@@ -3804,8 +3702,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
// InvokeFunction requires the function in a1. Move it in there.
__ mov(a1, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(a1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper());
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
@@ -3819,12 +3716,15 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
+ RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ mov(a0, result_register());
+ __ pop(a1);
+ __ pop(a2);
__ CallStub(&stub);
context()->Plug(v0);
}
@@ -3877,50 +3777,13 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(v0);
}
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = v0;
- Register left = a1;
- Register tmp = a2;
- Register tmp2 = a3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1)); // Result (right) in v0.
- __ pop(left);
-
- Label done, fail, ok;
- __ Branch(&ok, eq, left, Operand(right));
- // Fail if either is a non-HeapObject.
- __ And(tmp, left, Operand(right));
- __ JumpIfSmi(tmp, &fail);
- __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
- __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ Branch(&fail, ne, tmp, Operand(tmp2));
- __ lw(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ lw(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ Branch(&ok, eq, tmp, Operand(tmp2));
- __ bind(&fail);
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4195,8 +4058,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4204,34 +4067,48 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
+ // Push the builtins object as the receiver.
__ lw(a0, GlobalObjectOperand());
__ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
__ push(a0);
- }
+ // Load the function from the receiver.
+ __ li(a2, Operand(expr->name()));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Push the target function under the receiver.
+ __ lw(at, MemOperand(sp, 0));
+ __ push(at);
+ __ sw(v0, MemOperand(sp, kPointerSize));
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- __ li(a2, Operand(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, v0);
} else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
+ context()->Plug(v0);
}
- context()->Plug(v0);
}
@@ -4245,9 +4122,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ li(a1, Operand(Smi::FromInt(strict_mode_flag)));
+ __ li(a1, Operand(Smi::FromInt(strict_mode())));
__ push(a1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4255,11 +4130,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ lw(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ li(a0, Operand(Smi::FromInt(SLOPPY)));
__ Push(a2, a1, a0);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4273,7 +4148,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!context_register().is(a2));
__ li(a2, Operand(var->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(v0);
}
} else {
@@ -4348,16 +4223,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4443,7 +4313,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call);
__ bind(&slow);
}
- ToNumberStub convert_stub;
+ ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
// Save result for postfix expressions.
@@ -4473,10 +4343,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4506,10 +4374,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a0, result_register()); // Value.
__ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name.
__ pop(a1); // Receiver.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4523,10 +4388,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ mov(a0, result_register()); // Value.
__ Pop(a2, a1); // a1 = key, a2 = receiver.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4546,16 +4411,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4565,7 +4430,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ li(a0, Operand(proxy->name()));
__ Push(cp, a0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4591,12 +4456,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(v0, if_true);
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
+ } else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(v0, if_false);
// Check for undetectable objects => false.
__ GetObjectType(v0, v0, a1);
@@ -4605,20 +4471,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(eq, a1, Operand(zero_reg),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ } else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, v0, a1);
Split(eq, a1, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ } else if (String::Equals(check, factory->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
+ String::Equals(check, factory->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ } else if (String::Equals(check, factory->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ JumpIfSmi(v0, if_false);
@@ -4627,14 +4493,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
+ } else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(v0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ GetObjectType(v0, v0, a1);
__ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
+ } else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(v0, if_false);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -4686,7 +4552,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
@@ -4712,7 +4578,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -4746,7 +4612,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
diff --git a/chromium/v8/src/mips/ic-mips.cc b/chromium/v8/src/mips/ic-mips.cc
index 4c1ddbd5caf..834135cc031 100644
--- a/chromium/v8/src/mips/ic-mips.cc
+++ b/chromium/v8/src/mips/ic-mips.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "codegen.h"
-#include "code-stubs.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
+#include "src/codegen.h"
+#include "src/code-stubs.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -100,7 +77,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
}
-// Helper function used from LoadIC/CallIC GenerateNormal.
+// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
@@ -229,7 +206,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ And(at, scratch,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ Branch(slow, ne, at, Operand(zero_reg));
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -338,314 +316,6 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- a1 : receiver
- // -- a2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, a1, a2, a3, t0, t1, t2);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(a1, &number, t1);
- __ GetObjectType(a1, a3, a3);
- __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, a1);
- __ Branch(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, a1);
- __ Branch(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&boolean, eq, a1, Operand(t0));
- __ LoadRoot(t1, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t1));
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, a1, a2, a3, t0, t1, t2);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // a1: function
-
- // Check that the value isn't a smi.
- __ JumpIfSmi(a1, miss);
-
- // Check that the value is a JSFunction.
- __ GetObjectType(a1, scratch, scratch);
- __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(a1, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
-
- // a0: elements
- // Search the dictionary - put result in register a1.
- GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
-
- GenerateFunctionTailCall(masm, argc, &miss, t0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
- }
-
- // Get the receiver of the function from the stack.
- __ lw(a3, MemOperand(sp, argc*kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ Push(a3, a2);
-
- // Call the entry.
- __ PrepareCEntryArgs(2);
- __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to a1 and leave the internal frame.
- __ mov(a1, v0);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ lw(a2, MemOperand(sp, argc * kPointerSize));
- __ JumpIfSmi(a2, &invoke);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- __ sw(a2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(a1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into a1.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(a2, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
-
- __ bind(&do_call);
- // receiver in a1 is not used after this point.
- // a2: key
- // a1: function
-
- GenerateFunctionTailCall(masm, argc, &slow_call, a0);
-
- __ bind(&check_number_dictionary);
- // a2: key
- // a3: elements map
- // t0: elements pointer
- // Check whether the elements is a number dictionary.
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&slow_load, ne, a3, Operand(at));
- __ sra(a0, a2, kSmiTagSize);
- // a0: untagged index
- __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a2, a1, a2); // Save the key and pass the receiver and the key.
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(a2); // Restore the key.
- }
- __ mov(a1, v0);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, a2, a0, a3, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
- __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
-
- GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub,
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(a3, a2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(a2, &miss);
- __ IsObjectNameType(a2, a0, &miss);
-
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
@@ -654,9 +324,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -671,14 +339,18 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- lr : return address
// -- a0 : receiver
// -----------------------------------
- Label miss;
+ Label miss, slow;
GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
// a1: elements
- GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
+ GenerateDictionaryLoad(masm, &slow, a1, a2, v0, a3, t0);
__ Ret();
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -726,6 +398,8 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register scratch3,
Label* unmapped_case,
Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
// Check that the receiver is a JSObject. Because of the map check
// later, we do not need to check for interceptors or whether it
// requires access checks.
@@ -739,10 +413,11 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ Branch(slow_case, ne, scratch1, Operand(zero_reg));
// Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1,
scratch2,
- Heap::kNonStrictArgumentsElementsMapRootIndex,
+ arguments_map,
slow_case,
DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments. If not, jump
@@ -805,7 +480,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- a0 : key
@@ -830,7 +505,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -865,32 +540,6 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Load receiver.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
- __ lw(a1, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow, a3);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in a3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
- __ lw(a1, unmapped_location);
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- __ Branch(&slow, eq, a1, Operand(a3));
- GenerateFunctionTailCall(masm, argc, &slow, a3);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- ra : return address
@@ -982,7 +631,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
- masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+ masm, receiver, a2, a3, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
@@ -1135,7 +784,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1250,7 +899,7 @@ static void KeyedStoreGenerateGenericHelper(
// We have to see if the double version of the hole is present. If so
// go to the runtime.
__ Addu(address, elements,
- Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
+ Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset
- kHeapObjectTag));
__ sll(at, key, kPointerSizeLog2);
__ addu(address, address, at);
@@ -1327,7 +976,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1513,8 +1162,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -1523,9 +1171,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
@@ -1573,7 +1219,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.cc b/chromium/v8/src/mips/lithium-codegen-mips.cc
index 423ff9f5058..5edca6a3919 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.cc
+++ b/chromium/v8/src/mips/lithium-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.7
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,13 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+#include "src/v8.h"
-#include "mips/lithium-codegen-mips.h"
-#include "mips/lithium-gap-resolver-mips.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-#include "hydrogen-osr.h"
+#include "src/mips/lithium-codegen-mips.h"
+#include "src/mips/lithium-gap-resolver-mips.h"
+#include "src/code-stubs.h"
+#include "src/stub-cache.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -84,17 +84,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
- info()->CommitDependencies(code);
-}
-
-
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
}
@@ -148,24 +139,34 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
+ int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ lw(a2, MemOperand(sp, receiver_offset));
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ lw(a2, GlobalObjectOperand());
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ sw(a2, MemOperand(sp, receiver_offset));
+
__ bind(&ok);
}
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
- __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -175,8 +176,7 @@ bool LCodeGen::GeneratePrologue() {
if (slots > 0) {
if (FLAG_debug_code) {
__ Subu(sp, sp, Operand(slots * kPointerSize));
- __ push(a0);
- __ push(a1);
+ __ Push(a0, a1);
__ Addu(a0, sp, Operand(slots * kPointerSize));
__ li(a1, Operand(kSlotsZapValue));
Label loop;
@@ -184,8 +184,7 @@ bool LCodeGen::GeneratePrologue() {
__ Subu(a0, a0, Operand(kPointerSize));
__ sw(a1, MemOperand(a0, 2 * kPointerSize));
__ Branch(&loop, ne, a0, Operand(sp));
- __ pop(a1);
- __ pop(a0);
+ __ Pop(a0, a1);
} else {
__ Subu(sp, sp, Operand(slots * kPointerSize));
}
@@ -199,18 +198,22 @@ bool LCodeGen::GeneratePrologue() {
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is in a1.
- __ push(a1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ push(a1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both v0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Context is returned in both v0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mov(cp, v0);
+ __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -224,8 +227,15 @@ bool LCodeGen::GeneratePrologue() {
MemOperand target = ContextOperand(cp, var->index());
__ sw(a0, target);
// Update the write barrier. This clobbers a3 and a0.
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, a0, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
Comment(";;; End allocate local context");
@@ -257,6 +267,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -271,7 +284,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -404,7 +418,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
__ li(scratch, literal);
}
return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
__ lw(scratch, ToMemOperand(op));
return scratch;
}
@@ -440,7 +454,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (r.IsTagged()) {
Abort(kUnsupportedTaggedImmediate);
}
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
MemOperand mem_op = ToMemOperand(op);
__ ldc1(dbl_scratch, mem_op);
return dbl_scratch;
@@ -658,10 +672,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -695,7 +705,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
__ Call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -742,6 +751,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -854,46 +864,24 @@ void LCodeGen::DeoptimizeIf(Condition condition,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1064,31 +1052,19 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- __ lw(a0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -1102,208 +1078,218 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
// Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
- &left_is_not_negative, ge, left_reg, Operand(zero_reg));
- __ subu(result_reg, zero_reg, left_reg);
- __ And(result_reg, result_reg, divisor - 1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result_reg, zero_reg, result_reg);
+ __ subu(dividend, zero_reg, dividend);
+ __ And(dividend, dividend, Operand(mask));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
}
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ subu(dividend, zero_reg, dividend);
+ }
- __ bind(&left_is_not_negative);
- __ And(result_reg, left_reg, divisor - 1);
- __ bind(&done);
- } else {
- const Register scratch = scratch0();
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
+ __ bind(&dividend_is_not_negative);
+ __ And(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
- // div runs in the background while we check for special cases.
- Register right_reg = EmitLoadRegister(instr->right(), scratch);
- __ div(left_reg, right_reg);
- Label done;
- // Check for x % 0, we have to deopt in this case because we can't return a
- // NaN.
- if (right->CanBeZero()) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- // Check for kMinInt % -1, we have to deopt if we care about -0, because we
- // can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
- // TODO(svenpanne) Don't deopt when we don't care about -0.
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
- __ bind(&left_not_min_int);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ Mul(result, result, Operand(Abs(divisor)));
+ __ Subu(result, dividend, Operand(result));
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+ __ bind(&remainder_not_zero);
+ }
+}
- // TODO(svenpanne) Only emit the test/deopt if we have to.
- __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
- __ mfhi(result_reg);
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ const Register left_reg = ToRegister(instr->left());
+ const Register right_reg = ToRegister(instr->right());
+ const Register result_reg = ToRegister(instr->result());
+
+ // div runs in the background while we check for special cases.
+ __ div(left_reg, right_reg);
+
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+ }
+
+ // Check for kMinInt % -1, div will return kMinInt, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ } else {
+ __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(result_reg, zero_reg);
}
- __ bind(&done);
+ __ bind(&no_overflow_possible);
}
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
+ __ mfhi(result_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ }
+ __ bind(&done);
}
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, at, no_reg));
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ And(at, dividend, Operand(mask));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ Subu(result, zero_reg, dividend);
+ return;
+ }
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ Move(result, dividend);
+ } else if (shift == 1) {
+ __ srl(result, dividend, 31);
+ __ Addu(result, dividend, Operand(result));
+ } else {
+ __ sra(result, dividend, 31);
+ __ srl(result, result, 32 - shift);
+ __ Addu(result, dividend, Operand(result));
+ }
+ if (shift > 0) __ sra(result, result, shift);
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+}
+
- uint32_t divisor_abs = abs(divisor);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
- DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
- }
- // Compute the remainder.
- __ Move(remainder, zero_reg);
- return;
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ sra(scratch, dividend, power - 1);
- }
- __ srl(scratch, scratch, 32 - power);
- __ Addu(scratch, dividend, Operand(scratch));
- __ sra(result, scratch, power);
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ Subu(result, zero_reg, Operand(result));
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sll(scratch, result, power);
- __ Subu(remainder, dividend, Operand(scratch));
- } else {
- __ sll(scratch, result, power);
- __ Addu(remainder, dividend, Operand(scratch));
- }
- return;
- } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer's Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ li(scratch, Operand(M));
- __ mult(dividend, scratch);
- __ mfhi(scratch);
- if (M < 0) {
- __ Addu(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ sra(scratch, scratch, s);
- __ mov(scratch, scratch);
- }
- __ srl(at, dividend, 31);
- __ Addu(result, scratch, Operand(at));
- if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
- // Compute the remainder.
- __ li(scratch, Operand(divisor));
- __ Mul(scratch, result, Operand(scratch));
- __ Subu(remainder, dividend, Operand(scratch));
- } else {
- __ li(scratch, Operand(divisor));
- __ div(dividend, scratch);
- __ mfhi(remainder);
- __ mflo(result);
- }
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ Mul(scratch0(), result, Operand(divisor));
+ __ Subu(scratch0(), scratch0(), dividend);
+ DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
}
}
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
void LCodeGen::DoDivI(LDivI* instr) {
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
const Register result = ToRegister(instr->result());
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
- __ div(left, right);
+ __ div(dividend, divisor);
// Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
- __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
+ __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+ __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
__ bind(&left_not_min_int);
}
- if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
__ mfhi(result);
DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ __ mflo(result);
+ } else {
+ __ mflo(result);
}
- __ mflo(result);
}
@@ -1319,67 +1305,151 @@ void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+ Register scratch = result.is(dividend) ? scratch0() : dividend;
+ ASSERT(!result.is(dividend) || !scratch.is(dividend));
- if (instr->right()->IsConstantOperand()) {
- Label done;
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- if (divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
- }
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(scratch , remainder, Operand(divisor));
- __ Branch(&done, ge, scratch, Operand(zero_reg));
- __ Subu(result, result, Operand(1));
- __ bind(&done);
- } else {
- Label done;
- const Register right = ToRegister(instr->right());
+ // If the divisor is 1, return the dividend.
+ if (divisor == 1) {
+ __ Move(result, dividend);
+ return;
+ }
- // On MIPS div is asynchronous - it will run in the background while we
- // check for special cases.
- __ div(left, right);
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sra(result, dividend, shift);
+ return;
+ }
- // Check for x / 0.
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+ // If the divisor is negative, we have to negate and handle edge cases.
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
- __ bind(&left_not_zero);
- }
+ // dividend can be the same register as result so save the value of it
+ // for checking overflow.
+ __ Move(scratch, dividend);
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
- __ bind(&left_not_min_int);
+ __ Subu(result, zero_reg, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ }
+
+ // Dividing by -1 is basically negation, unless we overflow.
+ __ Xor(scratch, scratch, result);
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ DeoptimizeIf(ge, instr->environment(), scratch, Operand(zero_reg));
}
+ return;
+ }
- __ mfhi(remainder);
- __ mflo(result);
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ sra(result, result, shift);
+ return;
+ }
- // We performed a truncating division. Correct the result if necessary.
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(scratch , remainder, Operand(right));
- __ Branch(&done, ge, scratch, Operand(zero_reg));
- __ Subu(result, result, Operand(1));
- __ bind(&done);
+ Label no_overflow, done;
+ __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
+ __ li(result, Operand(kMinInt / divisor));
+ __ Branch(&done);
+ __ bind(&no_overflow);
+ __ sra(result, result, shift);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
+ dividend, Operand(zero_reg));
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ const Register result = ToRegister(instr->result());
+
+ // On MIPS div is asynchronous - it will run in the background while we
+ // check for special cases.
+ __ div(dividend, divisor);
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
}
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label left_not_zero;
+ __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
+ // We performed a truncating division. Correct the result if necessary.
+ Label done;
+ Register remainder = scratch0();
+ __ mfhi(remainder);
+ __ mflo(result);
+ __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+ __ Xor(remainder, remainder, Operand(divisor));
+ __ Branch(&done, ge, remainder, Operand(zero_reg));
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
}
@@ -1505,7 +1575,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister(instr->result());
Operand right(no_reg);
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, at));
} else {
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
@@ -1627,7 +1697,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, at);
__ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
} else {
@@ -1637,9 +1707,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
} else { // can_overflow.
Register overflow = scratch0();
Register scratch = scratch1();
- if (right->IsStackSlot() ||
- right->IsArgument() ||
- right->IsConstantOperand()) {
+ if (right->IsStackSlot() || right->IsConstantOperand()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ SubuAndCheckForOverflow(ToRegister(result),
ToRegister(left),
@@ -1683,9 +1751,9 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value(isolate());
+ Handle<Object> object = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
- __ li(ToRegister(instr->result()), value);
+ __ li(ToRegister(instr->result()), object);
}
@@ -1696,41 +1764,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ Move(result, input);
- __ JumpIfSmi(input, &done);
- }
-
- // If the object is not a value type, return the object.
- __ GetObjectType(input, map, map);
- __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
- __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -1847,17 +1880,6 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->value()));
- ASSERT(ToRegister(instr->context()).is(cp));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-}
-
-
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
@@ -1865,7 +1887,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, at);
__ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
} else {
@@ -1876,7 +1898,6 @@ void LCodeGen::DoAddI(LAddI* instr) {
Register overflow = scratch0();
Register scratch = scratch1();
if (right->IsStackSlot() ||
- right->IsArgument() ||
right->IsConstantOperand()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ AdduAndCheckForOverflow(ToRegister(result),
@@ -1904,20 +1925,19 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Register left_reg = ToRegister(left);
- Operand right_op = (right->IsRegister() || right->IsConstantOperand())
- ? ToOperand(right)
- : Operand(EmitLoadRegister(right, at));
+ Register right_reg = EmitLoadRegister(right, scratch0());
Register result_reg = ToRegister(instr->result());
Label return_right, done;
- if (!result_reg.is(left_reg)) {
- __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
- __ mov(result_reg, left_reg);
- __ Branch(&done);
+ Register scratch = scratch1();
+ __ Slt(scratch, left_reg, Operand(right_reg));
+ if (condition == ge) {
+ __ Movz(result_reg, left_reg, scratch);
+ __ Movn(result_reg, right_reg, scratch);
+ } else {
+ ASSERT(condition == le);
+ __ Movn(result_reg, left_reg, scratch);
+ __ Movz(result_reg, right_reg, scratch);
}
- __ Branch(&done, condition, left_reg, right_op);
- __ bind(&return_right);
- __ Addu(result_reg, zero_reg, right_op);
- __ bind(&done);
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
FPURegister left_reg = ToDoubleRegister(left);
@@ -1982,12 +2002,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ MultiPush(saved_regs);
__ PrepareCallCFunction(0, 2, scratch0());
- __ SetCallCDoubleArguments(left, right);
+ __ MovToFloatParameters(left, right);
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ GetCFunctionDoubleResult(result);
+ __ MovFromFloatResult(result);
// Restore saved register.
__ MultiPop(saved_regs);
@@ -2006,8 +2026,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
// instruction (andi zero_reg) will never be used in normal code.
@@ -2263,7 +2283,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- Condition cond = TokenToCondition(instr->op(), false);
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op(), is_unsigned);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
@@ -2307,8 +2330,8 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
cmp_left = ToRegister(right);
cmp_right = Operand(value);
}
- // We transposed the operands. Reverse the condition.
- cond = ReverseCondition(cond);
+ // We commuted the operands, so commute the condition.
+ cond = CommuteCondition(cond);
} else {
cmp_left = ToRegister(left);
cmp_right = Operand(ToRegister(right));
@@ -2429,7 +2452,7 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register temp1 = ToRegister(instr->temp());
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond =
EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
@@ -2450,7 +2473,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
__ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -2517,7 +2540,7 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
@@ -2648,8 +2671,8 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
Register result = ToRegister(instr->result());
ASSERT(result.is(v0));
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Branch(&true_label, eq, result, Operand(zero_reg));
__ li(result, Operand(factory()->false_value()));
@@ -2706,10 +2729,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
- __ Branch(&cache_miss, ne, map, Operand(at));
+ __ BranchShort(&cache_miss, ne, map, Operand(at));
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
- // with true or false.
+ // with true or false. The distance from map check has to be constant.
__ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
__ Branch(&done);
@@ -2749,7 +2772,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
+ InstanceofStub stub(isolate(), flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
@@ -2769,7 +2792,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
__ StoreToSafepointRegisterSlot(temp, temp);
}
- CallCodeGeneric(stub.GetCode(isolate()),
+ CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -2861,10 +2884,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2893,18 +2915,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->global_object()).is(a1));
- ASSERT(ToRegister(instr->value()).is(a0));
-
- __ li(a2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
@@ -2948,7 +2958,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ sw(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
@@ -2999,7 +3009,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in a2.
__ li(a2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3053,15 +3063,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->object());
- __ lw(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
@@ -3124,10 +3125,13 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+ int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ int base_offset = instr->base_offset();
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
@@ -3135,44 +3139,53 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), base_offset));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ __ ldc1(result, MemOperand(scratch0(), base_offset));
}
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
+ element_size_shift, shift_size, base_offset);
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
__ lb(result, mem_operand);
break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ lbu(result, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
__ lh(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ lhu(result, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
__ lw(result, mem_operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
DeoptimizeIf(Ugreater_equal, instr->environment(),
result, Operand(0x80000000));
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -3180,7 +3193,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3197,15 +3210,13 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int base_offset =
- FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- (instr->additional_index() << element_size_shift);
+ int base_offset = instr->base_offset();
if (key_is_constant) {
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- base_offset += constant_key << element_size_shift;
+ base_offset += constant_key * kDoubleSize;
}
__ Addu(scratch, elements, Operand(base_offset));
@@ -3220,7 +3231,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ ldc1(result, MemOperand(scratch));
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+ __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
}
@@ -3231,12 +3242,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Register store_base = scratch;
- int offset = 0;
+ int offset = instr->base_offset();
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ offset += ToInteger32(const_operand) * kPointerSize;
store_base = elements;
} else {
Register key = ToRegister(instr->key());
@@ -3251,9 +3261,8 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ sll(scratch, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch);
}
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
- __ lw(result, FieldMemOperand(store_base, offset));
+ __ lw(result, MemOperand(store_base, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -3269,7 +3278,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3285,19 +3294,12 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
int constant_key,
int element_size,
int shift_size,
- int additional_index,
- int additional_offset) {
- if (additional_index != 0 && !key_is_constant) {
- additional_index *= 1 << (element_size - shift_size);
- __ Addu(scratch0(), key, Operand(additional_index));
- }
-
+ int base_offset) {
if (key_is_constant) {
- return MemOperand(base,
- (constant_key << element_size) + additional_offset);
+ return MemOperand(base, (constant_key << element_size) + base_offset);
}
- if (additional_index == 0) {
+ if (base_offset == 0) {
if (shift_size >= 0) {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), base, scratch0());
@@ -3311,14 +3313,14 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
if (shift_size >= 0) {
- __ sll(scratch0(), scratch0(), shift_size);
+ __ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
+ return MemOperand(scratch0(), base_offset);
} else {
ASSERT_EQ(-1, shift_size);
- __ srl(scratch0(), scratch0(), 1);
+ __ sra(scratch0(), key, 1);
__ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
+ return MemOperand(scratch0(), base_offset);
}
}
@@ -3387,19 +3389,21 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// passed unchanged to builtins and strict-mode functions.
Label global_object, result_in_receiver;
- // Do not transform the receiver to object for strict mode
- // functions.
- __ lw(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lw(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ lw(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- // Do not transform the receiver to object for builtins.
- int32_t strict_mode_function_mask =
- 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
- int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
- __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
- __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
+ // Do not transform the receiver to object for builtins.
+ int32_t strict_mode_function_mask =
+ 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+ int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+ __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
+ __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
+ }
// Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
@@ -3414,14 +3418,15 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr->environment(),
scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(&result_in_receiver);
+ __ Branch(&result_in_receiver);
__ bind(&global_object);
-
- __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ lw(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ lw(result,
+ ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
__ lw(result,
- FieldMemOperand(result, JSGlobalObject::kGlobalReceiverOffset));
+ FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+
if (result.is(receiver)) {
__ bind(&result_in_receiver);
} else {
@@ -3478,8 +3483,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3517,35 +3521,13 @@ void LCodeGen::DoContext(LContext* instr) {
}
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ lw(result,
- MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
// The context is the first argument.
__ Push(cp, scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global_object());
- Register result = ToRegister(instr->result());
- __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3553,7 +3535,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
A1State a1_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3577,7 +3558,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ SetCallKind(t1, call_kind);
__ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(at);
@@ -3587,24 +3567,11 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(v0));
- __ mov(a0, v0);
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- A1_UNINITIALIZED);
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
ASSERT(instr->context() != NULL);
ASSERT(ToRegister(instr->context()).is(cp));
@@ -3649,7 +3616,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(v0))
@@ -3869,22 +3836,23 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f0));
if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(a2, &no_deopt);
__ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
__ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
@@ -3905,46 +3873,18 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(f4));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ mov(cp, zero_reg);
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ Clz(result, input);
}
@@ -3958,79 +3898,66 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
A1_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->result()).is(v0));
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ li(a2, Operand(instr->name()));
- CallCode(ic, mode, instr);
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ mov(sp, fp);
- __ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
- } else {
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ li(a0, Operand(instr->arity()));
}
-}
+ // Change context.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->result()).is(v0));
+ // Load the code entry address
+ __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(at);
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ li(a2, Operand(instr->name()));
- CallCode(ic, mode, instr);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- A1_UNINITIALIZED);
+
+ int arity = instr->arity();
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4041,10 +3968,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ li(a0, Operand(instr->arity()));
// No cell in a2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ li(a2, Operand(undefined_value));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -4054,17 +3980,16 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a0, Operand(instr->arity()));
- __ li(a2, Operand(instr->hydrogen()->property_cell()));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4075,19 +4000,20 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ Branch(&packed_case, eq, t1, Operand(zero_reg));
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -4135,46 +4061,38 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
return;
}
- Handle<Map> transition = instr->transition();
+ __ AssertNotSmi(object);
- if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- Register value = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ SmiTst(value, scratch);
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
- }
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- ASSERT(transition.is_null());
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
DoubleRegister value = ToDoubleRegister(instr->value());
__ sdc1(value, FieldMemOperand(object, offset));
return;
}
- if (!transition.is_null()) {
+ if (instr->hydrogen()->has_transition()) {
+ Handle<Map> transition = instr->hydrogen()->transition_map();
+ AddDeprecationDependency(transition);
__ li(scratch, Operand(transition));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
// Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- scratch,
- temp,
- GetRAState(),
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteForMap(object,
+ scratch,
+ temp,
+ GetRAState(),
+ kSaveFPRegs);
}
}
// Do the store.
Register value = ToRegister(instr->value());
- ASSERT(!object.is(value));
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -4187,7 +4105,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
} else {
__ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -4203,7 +4122,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ instr->hydrogen()->SmiCheckForWriteBarrier(),
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
}
@@ -4216,49 +4136,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in a2.
__ li(a2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::ApplyCheckIf(Condition condition,
- LBoundsCheck* check,
- Register src1,
- const Operand& src2) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
+ Operand operand(0);
+ Register reg;
+ if (instr->index()->IsConstantOperand()) {
+ operand = ToOperand(instr->index());
+ reg = ToRegister(instr->length());
+ cc = CommuteCondition(cc);
+ } else {
+ reg = ToRegister(instr->index());
+ operand = ToOperand(instr->length());
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
Label done;
- __ Branch(&done, NegateCondition(condition), src1, src2);
+ __ Branch(&done, NegateCondition(cc), reg, operand);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(condition, check->environment(), src1, src2);
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsSmi()) {
- __ li(at, Operand(Smi::FromInt(constant_index)));
- } else {
- __ li(at, Operand(constant_index));
- }
- ApplyCheckIf(condition,
- instr,
- at,
- Operand(ToRegister(instr->length())));
- } else {
- ApplyCheckIf(condition,
- instr,
- ToRegister(instr->index()),
- Operand(ToRegister(instr->length())));
+ DeoptimizeIf(cc, instr->environment(), reg, operand);
}
}
@@ -4280,10 +4181,12 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+ int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
Register address = scratch0();
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4298,34 +4201,44 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ Addu(address, external_pointer, address);
}
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(address, additional_offset));
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(address, additional_offset));
+ __ swc1(double_scratch0(), MemOperand(address, base_offset));
+ } else { // Storing doubles, not floats.
+ __ sdc1(value, MemOperand(address, base_offset));
}
} else {
Register value(ToRegister(instr->value()));
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
+ base_offset);
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
__ sb(value, mem_operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ sh(value, mem_operand);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ sw(value, mem_operand);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4333,7 +4246,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4347,6 +4260,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Register scratch = scratch0();
DoubleRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
+ int base_offset = instr->base_offset();
Label not_nan, done;
// Calculate the effective address of the slot in the array to store the
@@ -4358,13 +4272,11 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Abort(kArrayIndexConstantValueTooBig);
}
__ Addu(scratch, elements,
- Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand((constant_key << element_size_shift) + base_offset));
} else {
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- __ Addu(scratch, elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch, elements, Operand(base_offset));
__ sll(at, ToRegister(instr->key()), shift_size);
__ Addu(scratch, scratch, at);
}
@@ -4377,16 +4289,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// Only load canonical NaN if the comparison above set the overflow.
__ bind(&is_nan);
- __ Move(double_scratch,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
- element_size_shift));
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ sdc1(double_scratch, MemOperand(scratch, 0));
__ Branch(&done);
}
__ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
- element_size_shift));
+ __ sdc1(value, MemOperand(scratch, 0));
__ bind(&done);
}
@@ -4398,14 +4308,13 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
: no_reg;
Register scratch = scratch0();
Register store_base = scratch;
- int offset = 0;
+ int offset = instr->base_offset();
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ offset += ToInteger32(const_operand) * kPointerSize;
store_base = elements;
} else {
// Even though the HLoadKeyed instruction forces the input
@@ -4419,30 +4328,30 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
__ sll(scratch, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch);
}
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
- __ sw(value, FieldMemOperand(store_base, offset));
+ __ sw(value, MemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
+ __ Addu(key, store_base, Operand(offset));
__ RecordWrite(elements,
key,
value,
GetRAState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ check_needed,
+ instr->hydrogen()->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4458,7 +4367,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = (instr->strict_mode() == STRICT)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4483,18 +4392,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ li(new_map_reg, Operand(to_map));
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- scratch, GetRAState(), kDontSaveFPRegs);
+ __ RecordWriteForMap(object_reg,
+ new_map_reg,
+ scratch,
+ GetRAState(),
+ kDontSaveFPRegs);
} else {
+ ASSERT(object_reg.is(a0));
ASSERT(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
- __ mov(a0, object_reg);
__ li(a1, Operand(to_map));
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
}
__ bind(&not_applicable);
}
@@ -4513,18 +4426,12 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
- if (FLAG_new_string_add) {
- ASSERT(ToRegister(instr->left()).is(a1));
- ASSERT(ToRegister(instr->right()).is(a0));
- NewStringAddStub stub(instr->hydrogen()->flags(),
- isolate()->heap()->GetPretenureMode());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
+ ASSERT(ToRegister(instr->left()).is(a1));
+ ASSERT(ToRegister(instr->right()).is(a0));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4575,7 +4482,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
instr->context());
__ AssertSmi(v0);
__ SmiUntag(v0);
@@ -4651,22 +4558,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- Register scratch = scratch0();
-
- ASSERT(output->IsRegister());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
- DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
- } else {
- __ SmiTag(ToRegister(output), ToRegister(input));
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4677,28 +4568,17 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- Register scratch = scratch0();
- __ And(scratch, ToRegister(input), Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
- }
- __ SmiTag(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4722,9 +4602,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4741,18 +4623,19 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
DoubleRegister dbl_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4769,37 +4652,41 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
}
if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, t1);
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
__ Branch(&done);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, zero_reg);
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ Subu(v0, v0, kHeapObjectTag);
+ __ StoreToSafepointRegisterSlot(v0, dst);
+ }
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(zero_reg, dst);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Move(dst, v0);
- __ Subu(dst, dst, kHeapObjectTag);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
__ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
__ Addu(dst, dst, kHeapObjectTag);
- __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4848,11 +4735,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Subu(v0, v0, kHeapObjectTag);
@@ -4861,8 +4748,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ And(at, input, Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTagCheckOverflow(output, input, at);
+ DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+ } else {
+ __ SmiTag(output, input);
+ }
}
@@ -4955,8 +4855,9 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
Label no_heap_number, check_bools, check_false;
- __ Branch(&no_heap_number, ne, scratch1, Operand(at)); // HeapNumber map?
- __ mov(scratch2, input_reg);
+ // Check HeapNumber map.
+ __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+ __ mov(scratch2, input_reg); // In delay slot.
__ TruncateHeapNumberToI(input_reg, scratch2);
__ Branch(&done);
@@ -5143,7 +5044,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
@@ -5213,7 +5114,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(object);
__ mov(cp, zero_reg);
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(v0, scratch0());
@@ -5241,7 +5142,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register object_;
};
- if (instr->hydrogen()->CanOmitMapChecks()) return;
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
@@ -5249,20 +5157,20 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- for (int i = 0; i < map_set.size() - 1; i++) {
- Handle<Map> map = map_set.at(i).handle();
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
__ CompareMapAndBranch(map_reg, map, &success, eq, &success);
}
- Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
// Do the CompareMap() directly within the Branch() and DeoptimizeIf().
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
@@ -5322,6 +5230,25 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ FmoveHigh(result_reg, value_reg);
+ } else {
+ __ FmoveLow(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ __ Move(result_reg, lo_reg, hi_reg);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5411,7 +5338,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Push(Smi::FromInt(size));
+ if (size >= 0 && size <= Smi::kMaxValue) {
+ __ Push(Smi::FromInt(size));
+ } else {
+ // We should never get here at runtime => abort
+ __ stop("invalid allocation size");
+ return;
+ }
}
int flags = AllocateDoubleAlignFlag::encode(
@@ -5429,7 +5362,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5463,7 +5396,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ li(t1, Operand(instr->hydrogen()->pattern()));
__ li(t0, Operand(instr->hydrogen()->flags()));
__ Push(t3, t2, t1, t0);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(a1, v0);
__ bind(&materialized);
@@ -5476,7 +5409,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ li(a0, Operand(Smi::FromInt(size)));
__ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(a1);
__ bind(&allocated);
@@ -5501,16 +5434,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ li(a2, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ li(a2, Operand(instr->hydrogen()->shared_info()));
__ li(a1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, a2, a1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5533,8 +5467,8 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
instr->FalseLabel(chunk_),
input,
instr->type_literal(),
- cmp1,
- cmp2);
+ &cmp1,
+ &cmp2);
ASSERT(cmp1.is_valid());
ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
@@ -5549,22 +5483,23 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name,
- Register& cmp1,
- Operand& cmp2) {
+ Register* cmp1,
+ Operand* cmp2) {
// This function utilizes the delay slot heavily. This is used to load
// values that are always usable without depending on the type of the input
// register.
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label);
__ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- cmp1 = input;
- cmp2 = Operand(at);
+ *cmp1 = input;
+ *cmp2 = Operand(at);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->string_string())) {
+ } else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
__ Branch(USE_DELAY_SLOT, false_label,
@@ -5573,32 +5508,33 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
// other branch.
__ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable);
- cmp1 = at;
- cmp2 = Operand(zero_reg);
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->symbol_string())) {
+ } else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
- cmp1 = scratch;
- cmp2 = Operand(SYMBOL_TYPE);
+ *cmp1 = scratch;
+ *cmp2 = Operand(SYMBOL_TYPE);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->boolean_string())) {
+ } else if (String::Equals(type_name, factory->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- cmp1 = at;
- cmp2 = Operand(input);
+ *cmp1 = at;
+ *cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
- cmp1 = at;
- cmp2 = Operand(input);
+ *cmp1 = at;
+ *cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->undefined_string())) {
+ } else if (String::Equals(type_name, factory->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
// The first instruction of JumpIfSmi is an And - it is safe in the delay
@@ -5608,20 +5544,20 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable);
- cmp1 = at;
- cmp2 = Operand(zero_reg);
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg);
final_branch_condition = ne;
- } else if (type_name->Equals(heap()->function_string())) {
+ } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, scratch, input);
__ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
- cmp1 = input;
- cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+ *cmp1 = input;
+ *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->object_string())) {
+ } else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -5637,13 +5573,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
// Check for undetectable objects => false.
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable);
- cmp1 = at;
- cmp2 = Operand(zero_reg);
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg);
final_branch_condition = eq;
} else {
- cmp1 = at;
- cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
+ *cmp1 = at;
+ *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
__ Branch(false_label);
}
@@ -5680,23 +5616,24 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
}
}
+ last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5733,7 +5670,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -5769,11 +5706,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -5782,7 +5715,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5874,13 +5806,60 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object, index);
+ __ mov(cp, zero_reg);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, result, object, index);
+
Label out_of_object, done;
+
+ __ And(scratch, index, Operand(Smi::FromInt(1)));
+ __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
+ __ sra(index, index, 1);
+
__ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
__ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
@@ -5896,10 +5875,26 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ Subu(scratch, result, scratch);
__ lw(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ li(at, scope_info);
+ __ Push(at, ToRegister(instr->function()));
+ CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.h b/chromium/v8/src/mips/lithium-codegen-mips.h
index 71cc34fb8b4..d70c871265c 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.h
+++ b/chromium/v8/src/mips/lithium-codegen-mips.h
@@ -1,40 +1,17 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#include "deoptimizer.h"
-#include "mips/lithium-gap-resolver-mips.h"
-#include "mips/lithium-mips.h"
-#include "lithium-codegen.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "v8utils.h"
+#include "src/deoptimizer.h"
+#include "src/mips/lithium-gap-resolver-mips.h"
+#include "src/mips/lithium-mips.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -124,9 +101,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -138,6 +117,10 @@ class LCodeGen: public LCodeGenBase {
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -149,8 +132,7 @@ class LCodeGen: public LCodeGenBase {
int constant_key,
int element_size,
int shift_size,
- int additional_index,
- int additional_offset);
+ int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -161,9 +143,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
@@ -182,8 +162,6 @@ class LCodeGen: public LCodeGenBase {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(BailoutReason reason);
-
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
@@ -243,7 +221,6 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
A1State a1_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -260,10 +237,6 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void ApplyCheckIf(Condition condition,
- LBoundsCheck* check,
- Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
@@ -272,7 +245,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -344,8 +316,8 @@ class LCodeGen: public LCodeGenBase {
Label* false_label,
Register input,
Handle<String> type_name,
- Register& cmp1,
- Operand& cmp2);
+ Register* cmp1,
+ Operand* cmp2);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -424,12 +396,20 @@ class LCodeGen: public LCodeGenBase {
codegen_->expected_safepoint_kind_ = kind;
switch (codegen_->expected_safepoint_kind_) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PushSafepointRegisters();
+ case Safepoint::kWithRegisters: {
+ StoreRegistersStateStub stub1(codegen_->masm_->isolate(),
+ kDontSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub1);
break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PushSafepointRegistersAndDoubles();
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ StoreRegistersStateStub stub2(codegen_->masm_->isolate(),
+ kSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub2);
break;
+ }
default:
UNREACHABLE();
}
@@ -439,12 +419,20 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PopSafepointRegisters();
+ case Safepoint::kWithRegisters: {
+ RestoreRegistersStateStub stub1(codegen_->masm_->isolate(),
+ kDontSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub1);
break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PopSafepointRegistersAndDoubles();
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ RestoreRegistersStateStub stub2(codegen_->masm_->isolate(),
+ kSaveFPRegs);
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub2);
break;
+ }
default:
UNREACHABLE();
}
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
index 3ee74866c75..6447520c1b3 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "mips/lithium-gap-resolver-mips.h"
-#include "mips/lithium-codegen-mips.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/mips/lithium-gap-resolver-mips.h"
+#include "src/mips/lithium-codegen-mips.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.h b/chromium/v8/src/mips/lithium-gap-resolver-mips.h
index ea1ea3cbbf2..0072e526cb1 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.h
+++ b/chromium/v8/src/mips/lithium-gap-resolver-mips.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium.h"
+#include "src/lithium.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/mips/lithium-mips.cc b/chromium/v8/src/mips/lithium-mips.cc
index 0358feeef55..830fc9152df 100644
--- a/chromium/v8/src/mips/lithium-mips.cc
+++ b/chromium/v8/src/mips/lithium-mips.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-allocator-inl.h"
-#include "mips/lithium-mips.h"
-#include "mips/lithium-codegen-mips.h"
-#include "hydrogen-osr.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/lithium-allocator-inl.h"
+#include "src/mips/lithium-mips.h"
+#include "src/mips/lithium-codegen-mips.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -261,7 +238,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -282,7 +259,18 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -307,28 +295,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[a2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -365,7 +331,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -376,7 +342,7 @@ void LLoadKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
@@ -388,7 +354,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
+ stream->Add(" + %d] <-", base_offset());
} else {
stream->Add("] <- ");
}
@@ -462,7 +428,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(BailoutReason reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -568,8 +534,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -577,40 +542,35 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
+ LTemplateResultInstruction<1>* instr, int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
+ LTemplateResultInstruction<1>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -645,6 +605,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
!hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
}
return instr;
@@ -671,6 +633,19 @@ LUnallocated* LChunkBuilder::TempRegister() {
}
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
ASSERT(operand->HasFixedPolicy());
@@ -856,176 +831,108 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
chunk_->AddInstruction(dummy, current_block_);
}
} else {
- instr = current->CompileToLithium(this);
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
}
argument_count_ += current->argument_delta();
ASSERT(argument_count_ >= 0);
if (instr != NULL) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(current);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- ASSERT(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- if (instr->IsCall()) {
- HValue* hydrogen_value_for_lazy_bailout = current;
- LInstruction* instruction_needing_environment = NULL;
- if (current->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(current->next());
- instruction_needing_environment = instr;
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
- }
+ AddInstruction(instr, current);
}
+
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+// Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
}
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
}
+ ASSERT(fixed == 0 || used_at_start == 0);
}
+#endif
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
}
-
- return result;
}
@@ -1035,22 +942,21 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
HValue* value = instr->value();
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
ToBooleanStub::Types expected = instr->expected_input_types();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
- !expected.IsGeneric()) {
- return AssignEnvironment(result);
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
- return result;
+ return branch;
}
@@ -1116,9 +1022,13 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
}
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- LOperand* argument = Use(instr->argument());
- return new(zone()) LPushArgument(argument);
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = Use(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
}
@@ -1157,33 +1067,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+ return MarkAsCall(DefineFixed(result, v0), instr);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr);
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1201,12 +1116,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1215,30 +1128,17 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
+ return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
}
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
}
@@ -1248,7 +1148,7 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
+ LOperand* double_temp = TempDoubleRegister();
LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
return DefineAsRegister(result);
}
@@ -1257,7 +1157,7 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
// Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
LOperand* input = UseFixedDouble(instr->value(), f8);
- LOperand* temp = FixedTemp(f6);
+ LOperand* temp = TempDoubleRegister();
LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
return DefineFixedDouble(result, f4);
}
@@ -1269,8 +1169,11 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
? NULL
: UseFixed(instr->context(), cp);
LOperand* input = UseRegister(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LMathAbs(context, input));
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1291,38 +1194,12 @@ LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
- LOperand* temp = FixedTemp(f6);
+ LOperand* temp = TempDoubleRegister();
LMathRound* result = new(zone()) LMathRound(input, temp);
return AssignEnvironment(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* key = UseFixed(instr->key(), a2);
- return MarkAsCall(
- DefineFixed(new(zone()) LCallKeyed(context, key), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
@@ -1343,9 +1220,7 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
LCallFunction* call = new(zone()) LCallFunction(context, function);
- LInstruction* result = DefineFixed(call, v0);
- if (instr->IsTailCall()) return result;
- return MarkAsCall(result, instr);
+ return MarkAsCall(DefineFixed(call, v0), instr);
}
@@ -1390,14 +1265,70 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LDivI* div = new(zone()) LDivI(dividend, divisor);
- return AssignEnvironment(DefineAsRegister(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1406,86 +1337,110 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer's Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
- return false;
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- // Only optimize when we have magic numbers for the divisor.
- // The standard integer division routine is usually slower than transitionning
- // to FPU.
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- return NULL;
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor);
+ return AssignEnvironment(DefineAsRegister(div));
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegisterOrConstant(right);
- LOperand* remainder = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseConstant(right));
- LInstruction* result = DefineAsRegister(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- } else {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right),
- TempRegister(),
- FixedTemp(f20),
- FixedTemp(f22));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1)) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- }
+ return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
} else {
@@ -1694,8 +1649,6 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1711,8 +1664,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* value = UseRegister(instr->value());
LOperand* scratch = TempRegister();
return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
@@ -1801,19 +1752,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), a0);
LDateField* result =
@@ -1841,9 +1779,16 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = UseRegister(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseRegisterOrConstantAtStart(instr->length())
+ : UseRegisterAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1861,13 +1806,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), a0);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -1884,20 +1822,21 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ HValue* val = instr->value();
if (from.IsSmi()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
from = Representation::Tagged();
}
if (from.IsTagged()) {
if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value));
@@ -1905,78 +1844,71 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = NULL;
- LInstruction* res = NULL;
- HValue* val = instr->value();
if (val->type().IsSmi() || val->representation().IsSmi()) {
- value = UseRegisterAtStart(val);
- res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
- value = UseRegister(val);
+ LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister();
- LOperand* temp2 = FixedTemp(f22);
- res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
- temp1,
- temp2));
- res = AssignEnvironment(res);
+ LOperand* temp2 = TempDoubleRegister();
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
}
- return res;
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
-
- // Make sure that the temp and result_temp registers are
- // different.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- Define(result, result_temp);
- return AssignPointerMap(result);
+ return AssignPointerMap(Define(result, result_temp));
} else if (to.IsSmi()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return AssignEnvironment(
DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- LDoubleToI* res = new(zone()) LDoubleToI(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegisterAtStart(val);
return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
}
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
- : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value())));
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
} else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
}
}
}
@@ -1987,7 +1919,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2011,15 +1947,12 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) {
- value = UseRegisterAtStart(instr->value());
- if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
- }
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (!instr->CanOmitMapChecks()) {
- AssignEnvironment(result);
- if (instr->has_migration_target()) return AssignPointerMap(result);
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
}
return result;
}
@@ -2031,19 +1964,33 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
// Revisit this decision, here and 8 lines below.
- return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(f22)));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg,
+ TempDoubleRegister()));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
ASSERT(input_rep.IsSmiOrTagged());
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve f22 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(f22));
+ LClampTToUint8* result =
+ new(zone()) LClampTToUint8(reg, TempDoubleRegister());
return AssignEnvironment(DefineAsRegister(result));
}
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2100,21 +2047,14 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(), a1);
- LOperand* value = UseFixed(instr->value(), a0);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2129,7 +2069,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
value = UseRegister(instr->value());
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2160,20 +2103,13 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LInstruction* result = NULL;
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2181,25 +2117,28 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2215,7 +2154,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2244,17 +2183,17 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
- (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
- ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
- (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
-
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
@@ -2275,17 +2214,18 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
return result;
} else {
+ LOperand* object = UseFixed(instr->object(), a0);
LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL);
- return AssignPointerMap(result);
+ return MarkAsCall(result, instr);
}
}
@@ -2318,11 +2258,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ if (needs_write_barrier || instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2331,14 +2269,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2354,12 +2285,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = FLAG_new_string_add
- ? UseFixed(instr->left(), a1)
- : UseRegisterAtStart(instr->left());
- LOperand* right = FLAG_new_string_add
- ? UseFixed(instr->right(), a0)
- : UseRegisterAtStart(instr->right());
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
return MarkAsCall(
DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
instr);
@@ -2372,7 +2299,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
@@ -2428,7 +2355,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
@@ -2501,9 +2428,6 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2535,13 +2459,13 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2595,9 +2519,27 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
}
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/mips/lithium-mips.h b/chromium/v8/src/mips/lithium-mips.h
index 8d34399057b..ea3a658f3c3 100644
--- a/chromium/v8/src/mips/lithium-mips.h
+++ b/chromium/v8/src/mips/lithium-mips.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_LITHIUM_MIPS_H_
#define V8_MIPS_LITHIUM_MIPS_H_
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
+#include "src/hydrogen.h"
+#include "src/lithium-allocator.h"
+#include "src/lithium.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -44,6 +21,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
+ V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -52,12 +30,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -83,24 +58,28 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleBits) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
- V(ElementsKind) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -109,7 +88,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -119,7 +97,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
@@ -131,17 +108,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
V(MathExp) \
+ V(MathClz32) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -150,7 +126,6 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
@@ -164,8 +139,8 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
+ V(StoreFrameContext) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -177,16 +152,13 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -266,7 +238,9 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
@@ -303,10 +277,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -318,10 +290,20 @@ class LTemplateInstruction : public LInstruction {
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
private:
+ // Iterator support.
virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
@@ -440,6 +422,7 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -490,10 +473,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -558,6 +537,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
@@ -622,72 +602,159 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
public:
- // Used when the right hand is a constant power of 2.
LModI(LOperand* left,
LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
- }
-
- // Used for the standard case.
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LDivI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
@@ -811,39 +878,15 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathSin(LOperand* value) {
+ explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathTan(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
@@ -1318,34 +1361,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
@@ -1401,20 +1416,6 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
- LThrow(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
@@ -1584,20 +1585,6 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
@@ -1613,12 +1600,18 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@@ -1678,28 +1671,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1761,15 +1732,15 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
- temps_[0] = code_object;
+ inputs_[1] = code_object;
}
LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return temps_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
@@ -1808,18 +1779,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@@ -1833,95 +1792,73 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
+ LOperand* function() { return inputs_[0]; }
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
+ LOperand* target() const { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNamed(LOperand* context) {
+ LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1943,35 +1880,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@@ -2021,7 +1929,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2043,19 +1951,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -2068,38 +1963,33 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2184,6 +2074,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2234,7 +2125,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
@@ -2259,7 +2149,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2272,6 +2162,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2284,7 +2180,7 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@@ -2310,7 +2206,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2437,7 +2333,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMaps(LOperand* value) {
+ explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
}
@@ -2513,6 +2409,33 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2705,6 +2628,35 @@ class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
@@ -2716,26 +2668,24 @@ class LPlatformChunk V8_FINAL : public LChunk {
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition) { }
+ allocator_(allocator) { }
+
+ Isolate* isolate() const { return graph_->isolate(); }
// Build the sequence for the graph.
LPlatformChunk* Build();
- LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
-
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2744,18 +2694,24 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
static bool HasMagicNumberForDivisor(int32_t divisor);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2768,7 +2724,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2818,31 +2773,26 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LUnallocated* TempDoubleRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@@ -2856,11 +2806,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
@@ -2873,14 +2820,11 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
- int position_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/chromium/v8/src/mips/macro-assembler-mips.cc b/chromium/v8/src/mips/macro-assembler-mips.cc
index f33e6fa063c..45ba4a994e8 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/macro-assembler-mips.cc
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "isolate-inl.h"
-#include "runtime.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
@@ -79,6 +56,11 @@ void MacroAssembler::Store(Register src,
} else if (r.IsInteger16() || r.IsUInteger16()) {
sh(src, dst);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
sw(src, dst);
}
}
@@ -212,7 +194,8 @@ void MacroAssembler::RecordWriteField(
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
ASSERT(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
@@ -242,7 +225,8 @@ void MacroAssembler::RecordWriteField(
ra_status,
save_fp,
remembered_set_action,
- OMIT_SMI_CHECK);
+ OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
bind(&done);
@@ -255,16 +239,93 @@ void MacroAssembler::RecordWriteField(
}
+// Will clobber 4 registers: object, map, dst, ip. The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode) {
+ if (emit_debug_code()) {
+ ASSERT(!dst.is(at));
+ lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ Check(eq,
+ kWrongAddressOrValuePassedToRecordWrite,
+ dst,
+ Operand(isolate()->factory()->meta_map()));
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ if (emit_debug_code()) {
+ lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
+ Check(eq,
+ kWrongAddressOrValuePassedToRecordWrite,
+ map,
+ Operand(at));
+ }
+
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+
+ Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
+ Branch(&ok, eq, at, Operand(zero_reg));
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ // Record the actual write.
+ if (ra_status == kRAHasNotBeenSaved) {
+ push(ra);
+ }
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (ra_status == kRAHasNotBeenSaved) {
+ pop(ra);
+ }
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
+ li(map, Operand(BitCast<int32_t>(kZapValue + 16)));
+ }
+}
+
+
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- RAStatus ra_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
ASSERT(!AreAliased(object, address, value, t8));
ASSERT(!AreAliased(object, address, value, t9));
@@ -274,6 +335,11 @@ void MacroAssembler::RecordWrite(Register object,
eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
// TODO(mstarzinger): Dynamic counter missing.
@@ -287,11 +353,13 @@ void MacroAssembler::RecordWrite(Register object,
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ }
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -302,7 +370,8 @@ void MacroAssembler::RecordWrite(Register object,
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
@@ -352,7 +421,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
push(ra);
StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
+ StoreBufferOverflowStub(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
@@ -789,7 +858,28 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
-//------------Pseudo-instructions-------------
+void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
+ if (kArchVariant == kLoongson) {
+ lw(zero_reg, rs);
+ } else {
+ pref(hint, rs);
+ }
+}
+
+
+// ------------Pseudo-instructions-------------
+
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
+ lwr(rd, rs);
+ lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
+ swr(rd, rs);
+ swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
AllowDeferredHandleDereference smi_check;
@@ -1195,7 +1285,7 @@ void MacroAssembler::BranchF(Label* target,
break;
default:
CHECK(0);
- };
+ }
}
if (bd == PROTECT) {
@@ -1207,12 +1297,12 @@ void MacroAssembler::BranchF(Label* target,
void MacroAssembler::Move(FPURegister dst, double imm) {
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
+ DoubleRepresentation value_rep(imm);
// Handle special values first.
bool force_load = dst.is(kDoubleRegZero);
- if (value.bits == zero.bits && !force_load) {
+ if (value_rep == zero && !force_load) {
mov_d(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits && !force_load) {
+ } else if (value_rep == minus_zero && !force_load) {
neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
@@ -1435,7 +1525,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
sdc1(double_input, MemOperand(sp, 0));
- DoubleToIStub stub(sp, result, 0, true, true);
+ DoubleToIStub stub(isolate(), sp, result, 0, true, true);
CallStub(&stub);
Addu(sp, sp, Operand(kDoubleSize));
@@ -1456,7 +1546,8 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
// If we fell through then inline version didn't succeed - call stub instead.
push(ra);
- DoubleToIStub stub(object,
+ DoubleToIStub stub(isolate(),
+ object,
result,
HeapNumber::kValueOffset - kHeapObjectTag,
true,
@@ -1542,19 +1633,27 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
if (is_near(L)) {
BranchShort(L, cond, rs, rt, bdslot);
} else {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
}
} else {
if (is_trampoline_emitted()) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
} else {
BranchShort(L, cond, rs, rt, bdslot);
}
@@ -2015,7 +2114,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
@@ -2062,7 +2161,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Uless_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
@@ -2657,18 +2756,14 @@ void MacroAssembler::Push(Handle<Object> handle) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
void MacroAssembler::DebugBreak() {
PrepareCEntryArgs(0);
PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
+ CEntryStub ces(isolate(), 1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -2823,7 +2918,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3113,33 +3208,12 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
- li(scratch1, Operand(high_promotion_mode));
- lw(scratch1, MemOperand(scratch1, 0));
- Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
-
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- jmp(&install_map);
-
- bind(&allocate_new_space);
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- flags);
-
- bind(&install_map);
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -3284,13 +3358,24 @@ void MacroAssembler::CopyBytes(Register src,
// TODO(kalmard) check if this can be optimized to use sw in most cases.
// Can't use unaligned access - copy byte by byte.
- sb(scratch, MemOperand(dst, 0));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 3));
+ if (kArchEndian == kLittle) {
+ sb(scratch, MemOperand(dst, 0));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ } else {
+ sb(scratch, MemOperand(dst, 3));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 0));
+ }
+
Addu(dst, dst, 4);
Subu(length, length, Operand(kPointerSize));
@@ -3395,11 +3480,12 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
- sw(exponent_reg, FieldMemOperand(scratch1, offset));
+ sw(mantissa_reg,
+ FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+ + kHoleNanLower32Offset));
+ sw(exponent_reg,
+ FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+ + kHoleNanUpper32Offset));
jmp(&done);
bind(&maybe_nan);
@@ -3410,10 +3496,9 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
bind(&is_nan);
// Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ LoadRoot(at, Heap::kNanValueRootIndex);
+ lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
+ lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
jmp(&have_double_value);
bind(&smi_value);
@@ -3496,64 +3581,77 @@ void MacroAssembler::CheckMap(Register obj,
}
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
- Move(dst, v0, v1);
+ if (kArchEndian == kLittle) {
+ Move(dst, v0, v1);
+ } else {
+ Move(dst, v1, v0);
+ }
} else {
Move(dst, f0); // Reg f0 is o32 ABI FP return value.
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
+void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
+ if (IsMipsSoftFloatABI) {
+ if (kArchEndian == kLittle) {
+ Move(dst, a0, a1);
+ } else {
+ Move(dst, a1, a0);
+ }
} else {
- Move(a0, a1, dreg);
+ Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
- if (dreg2.is(f12)) {
- ASSERT(!dreg1.is(f14));
- Move(f14, dreg2);
- Move(f12, dreg1);
+ Move(f12, src);
+ } else {
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src);
} else {
- Move(f12, dreg1);
- Move(f14, dreg2);
+ Move(a1, a0, src);
}
- } else {
- Move(a0, a1, dreg1);
- Move(a2, a3, dreg2);
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
- Register reg) {
+void MacroAssembler::MovToFloatResult(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
- Move(a2, reg);
+ Move(f0, src);
} else {
- Move(a2, reg);
- Move(a0, a1, dreg);
+ if (kArchEndian == kLittle) {
+ Move(v0, v1, src);
+ } else {
+ Move(v1, v0, src);
+ }
}
}
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be t1 to
- // follow the calling convention which requires the call type to be
- // in t1.
- ASSERT(dst.is(t1));
- if (call_kind == CALL_AS_FUNCTION) {
- li(dst, Operand(Smi::FromInt(1)));
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+ DoubleRegister src2) {
+ if (!IsMipsSoftFloatABI) {
+ if (src2.is(f12)) {
+ ASSERT(!src1.is(f14));
+ Move(f14, src2);
+ Move(f12, src1);
+ } else {
+ Move(f12, src1);
+ Move(f14, src2);
+ }
} else {
- li(dst, Operand(Smi::FromInt(0)));
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src1);
+ Move(a2, a3, src2);
+ } else {
+ Move(a1, a0, src1);
+ Move(a3, a2, src2);
+ }
}
}
@@ -3568,8 +3666,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label regular_invoke;
@@ -3579,7 +3676,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
- // a3: callee code entry
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
@@ -3623,14 +3719,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(t1, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
Branch(done);
}
} else {
- SetCallKind(t1, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&regular_invoke);
@@ -3642,8 +3736,7 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3652,16 +3745,14 @@ void MacroAssembler::InvokeCode(Register code,
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(t1, call_kind);
Call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, call_kind);
Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -3671,41 +3762,10 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
-
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(t1, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(t1, call_kind);
- Jump(code, rmode);
- }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3723,7 +3783,7 @@ void MacroAssembler::InvokeFunction(Register function,
lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
@@ -3731,8 +3791,7 @@ void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3746,7 +3805,7 @@ void MacroAssembler::InvokeFunction(Register function,
// allow recompilation to take effect without changing any of the
// call sites.
lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(a3, expected, actual, flag, call_wrapper);
}
@@ -3754,10 +3813,9 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
li(a1, function);
- InvokeFunction(a1, expected, actual, flag, call_wrapper, call_kind);
+ InvokeFunction(a1, expected, actual, flag, call_wrapper);
}
@@ -3879,13 +3937,17 @@ void MacroAssembler::CallStub(CodeStub* stub,
const Operand& r2,
BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
cond, r1, r2, bd);
}
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+void MacroAssembler::TailCallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2,
+ BranchDelaySlot bd) {
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
@@ -3895,10 +3957,8 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(
- ExternalReference function,
- Address function_address,
+ Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand) {
@@ -3912,6 +3972,22 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference::handle_scope_level_address(isolate()),
next_address);
+ ASSERT(function_address.is(a1) || function_address.is(a2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
+ lb(t9, MemOperand(t9, 0));
+ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+ // Additional parameter is the address of the actual callback.
+ li(t9, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ mov(t9, function_address);
+ bind(&end_profiler_check);
+
// Allocate HandleScope in callee-save registers.
li(s3, Operand(next_address));
lw(s0, MemOperand(s3, kNextOffset));
@@ -3929,29 +4005,10 @@ void MacroAssembler::CallApiFunctionAndReturn(
PopSafepointRegisters();
}
- Label profiler_disabled;
- Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
- lb(t9, MemOperand(t9, 0));
- beq(t9, zero_reg, &profiler_disabled);
-
- // Third parameter is the address of the actual getter function.
- li(thunk_last_arg, reinterpret_cast<int32_t>(function_address));
- li(t9, Operand(thunk_ref));
- jmp(&end_profiler_check);
-
- bind(&profiler_disabled);
- li(t9, Operand(function));
-
- bind(&end_profiler_check);
-
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(this, t9);
if (FLAG_log_timer_events) {
@@ -4004,7 +4061,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
@@ -4028,27 +4085,14 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addiu(sp, sp, num_arguments * kPointerSize);
- }
- LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash,
- Register index) {
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- sll(index, hash, kSmiTagSize);
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
}
@@ -4190,10 +4234,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -4201,7 +4242,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
- CEntryStub stub(1, save_doubles);
+ CEntryStub stub(isolate(), 1, save_doubles);
CallStub(&stub);
}
@@ -4212,7 +4253,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ext);
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
}
@@ -4241,8 +4282,8 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd) {
PrepareCEntryFunction(builtin);
- CEntryStub stub(1);
- Jump(stub.GetCode(isolate()),
+ CEntryStub stub(isolate(), 1);
+ Jump(stub.GetCode(),
RelocInfo::CODE_TARGET,
al,
zero_reg,
@@ -4260,12 +4301,10 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
GetBuiltinEntry(t9, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
- SetCallKind(t1, CALL_AS_METHOD);
Call(t9);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, CALL_AS_METHOD);
Jump(t9);
}
}
@@ -4366,16 +4405,8 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason,
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -4387,18 +4418,16 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- li(a0, Operand(p0));
- push(a0);
- li(a0, Operand(Smi::FromInt(p1 - p0)));
+ li(a0, Operand(Smi::FromInt(reason)));
push(a0);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -4406,8 +4435,8 @@ void MacroAssembler::Abort(BailoutReason reason) {
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
// Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedAbortInstructions = 14;
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -4460,31 +4489,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- lw(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
lw(function,
@@ -4497,19 +4501,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- lw(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- lw(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- lw(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -4526,36 +4517,37 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
+void MacroAssembler::StubPrologue() {
Push(ra, fp, cp);
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Load the stub address to t9 and call it,
+ // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
+ CONSTANT_SIZE);
+ nop(); // Prevent jalr to jal optimization.
+ jalr(t9, a0);
+ nop(); // Branch delay slot nop.
+ nop(); // Pad the empty space.
} else {
- PredictableCodeSizeScope predictible_code_size_scope(
- this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- if (isolate()->IsCodePreAgingActive()) {
- // Pre-age the code.
- Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
- nop(Assembler::CODE_AGE_MARKER_NOP);
- // Load the stub address to t9 and call it,
- // GetCodeAgeAndParity() extracts the stub address from this instruction.
- li(t9,
- Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
- CONSTANT_SIZE);
- nop(); // Prevent jalr to jal optimization.
- jalr(t9, a0);
- nop(); // Branch delay slot nop.
- nop(); // Pad the empty space.
- } else {
- Push(ra, fp, cp, a1);
- nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- // Adjust fp to point to caller's fp.
- Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- }
+ Push(ra, fp, cp, a1);
+ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ // Adjust fp to point to caller's fp.
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
@@ -4885,6 +4877,23 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
+ pop(object);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
ASSERT(!reg.is(at));
@@ -5075,14 +5084,14 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
uint32_t encoding_mask) {
Label is_object;
SmiTst(string, at);
- ThrowIf(eq, kNonObject, at, Operand(zero_reg));
+ Check(ne, kNonObject, at, Operand(zero_reg));
lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
andi(at, at, kStringRepresentationMask | kStringEncodingMask);
li(scratch, Operand(encoding_mask));
- ThrowIf(ne, kUnexpectedStringType, at, Operand(scratch));
+ Check(eq, kUnexpectedStringType, at, Operand(scratch));
// The index is assumed to be untagged coming in, tag it to compare with the
// string length without using a temp register, it is restored at the end of
@@ -5091,14 +5100,14 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
TrySmiTag(index, scratch, &index_tag_bad);
Branch(&index_tag_ok);
bind(&index_tag_bad);
- Throw(kIndexIsTooLarge);
+ Abort(kIndexIsTooLarge);
bind(&index_tag_ok);
lw(at, FieldMemOperand(string, String::kLengthOffset));
- ThrowIf(ge, kIndexIsTooLarge, index, Operand(at));
+ Check(lt, kIndexIsTooLarge, index, Operand(at));
ASSERT(Smi::FromInt(0) == 0);
- ThrowIf(lt, kIndexIsNegative, index, Operand(zero_reg));
+ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
SmiUntag(index, index);
}
@@ -5293,7 +5302,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
if (map->CanBeDeprecated()) {
li(scratch, Operand(map));
lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
- And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ And(scratch, scratch, Operand(Map::Deprecated::kMask));
Branch(if_deprecated, ne, scratch, Operand(zero_reg));
}
}
@@ -5484,57 +5493,6 @@ void MacroAssembler::EnsureNotWhite(
}
-void MacroAssembler::Throw(BailoutReason reason) {
- Label throw_start;
- bind(&throw_start);
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
- RecordComment("Throw message: ");
- RecordComment(msg);
- }
-#endif
-
- li(a0, Operand(Smi::FromInt(reason)));
- push(a0);
- // Disable stub call restrictions to always allow calls to throw.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kThrowMessage, 1);
- }
- // will not return here
- if (is_trampoline_pool_blocked()) {
- // If the calling code cares throw the exact number of
- // instructions generated, we insert padding here to keep the size
- // of the ThrowMessage macro constant.
- // Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedThrowMessageInstructions = 14;
- int throw_instructions = InstructionsGeneratedSince(&throw_start);
- ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
- while (throw_instructions++ < kExpectedThrowMessageInstructions) {
- nop();
- }
- }
-}
-
-
-void MacroAssembler::ThrowIf(Condition cc,
- BailoutReason reason,
- Register rs,
- Operand rt) {
- Label L;
- Branch(&L, NegateCondition(cc), rs, rt);
- Throw(reason);
- // will not return here
- bind(&L);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
@@ -5550,7 +5508,8 @@ void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
- And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
+ And(dst, dst, Operand(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
}
@@ -5579,11 +5538,17 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
bind(&start);
- // Check that there are no elements. Register r2 contains the current JS
+ // Check that there are no elements. Register a2 contains the current JS
// object we've reached through the prototype chain.
+ Label no_elements;
lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
- Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
+ Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
+ // Second chance, the object may be using the empty slow element dictionary.
+ LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
+ Branch(call_runtime, ne, a2, Operand(at));
+
+ bind(&no_elements);
lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
Branch(&next, ne, a2, Operand(null_value));
}
@@ -5697,7 +5662,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
bind(&loop_again);
lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ DecodeField<Map::ElementsKindBits>(scratch1);
Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
Branch(&loop_again, ne, current, Operand(factory->null_value()));
@@ -5715,10 +5680,13 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
}
-CodePatcher::CodePatcher(byte* address, int instructions)
+CodePatcher::CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
+ masm_(NULL, address, size_ + Assembler::kGap),
+ flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -5728,7 +5696,9 @@ CodePatcher::CodePatcher(byte* address, int instructions)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ if (flush_cache_ == FLUSH) {
+ CPU::FlushICache(address_, size_);
+ }
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
@@ -5768,6 +5738,28 @@ void CodePatcher::ChangeBranchCondition(Condition cond) {
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!dividend.is(result));
+ ASSERT(!dividend.is(at));
+ ASSERT(!result.is(at));
+ MultiplierAndShift ms(divisor);
+ li(at, Operand(ms.multiplier()));
+ Mult(dividend, Operand(at));
+ mfhi(result);
+ if (divisor > 0 && ms.multiplier() < 0) {
+ Addu(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && ms.multiplier() > 0) {
+ Subu(result, result, Operand(dividend));
+ }
+ if (ms.shift() > 0) sra(result, result, ms.shift());
+ srl(at, dividend, 31);
+ Addu(result, result, Operand(at));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/mips/macro-assembler-mips.h b/chromium/v8/src/mips/macro-assembler-mips.h
index 4e30c353e2c..d339a3f7a2d 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.h
+++ b/chromium/v8/src/mips/macro-assembler-mips.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-#include "assembler.h"
-#include "mips/assembler-mips.h"
-#include "v8globals.h"
+#include "src/assembler.h"
+#include "src/mips/assembler-mips.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -94,6 +71,10 @@ enum LiFlags {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1,
@@ -169,6 +150,7 @@ class MacroAssembler: public Assembler {
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+ DECLARE_BRANCH_PROTOTYPES(BranchShort)
#undef DECLARE_BRANCH_PROTOTYPES
#undef COND_TYPED_ARGS
@@ -387,7 +369,9 @@ class MacroAssembler: public Assembler {
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
@@ -399,7 +383,9 @@ class MacroAssembler: public Assembler {
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
@@ -407,9 +393,17 @@ class MacroAssembler: public Assembler {
ra_status,
save_fp,
remembered_set_action,
- smi_check);
+ smi_check,
+ pointers_to_here_check_for_value);
}
+ void RecordWriteForMap(
+ Register object,
+ Register map,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp);
+
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
@@ -420,7 +414,9 @@ class MacroAssembler: public Assembler {
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// ---------------------------------------------------------------------------
@@ -601,12 +597,17 @@ class MacroAssembler: public Assembler {
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
+ void Pref(int32_t hint, const MemOperand& rs);
+
// ---------------------------------------------------------------------------
// Pseudo-instructions.
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+ void Ulw(Register rd, const MemOperand& rs);
+ void Usw(Register rd, const MemOperand& rs);
+
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
@@ -751,7 +752,7 @@ class MacroAssembler: public Assembler {
FPURegister cmp1,
FPURegister cmp2) {
BranchF(target, nan, cc, cmp1, cmp2, bd);
- };
+ }
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
@@ -865,14 +866,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -889,47 +883,31 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// JavaScript invokes.
- // Set up call kind marking in t1. The method takes t1 as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void IsObjectJSObjectType(Register heap_object,
@@ -949,13 +927,10 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// -------------------------------------------------------------------------
// Debugger Support.
void DebugBreak();
-#endif
-
// -------------------------------------------------------------------------
// Exception handling.
@@ -974,12 +949,6 @@ class MacroAssembler: public Assembler {
// handler chain.
void ThrowUncatchable(Register value);
- // Throw a message string as an exception.
- void Throw(BailoutReason reason);
-
- // Throw a message string as an exception if a condition is not true.
- void ThrowIf(Condition cc, BailoutReason reason, Register rs, Operand rt);
-
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
@@ -1091,10 +1060,6 @@ class MacroAssembler: public Assembler {
Handle<Code> success,
SmiCheckType smi_check_type);
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
@@ -1189,16 +1154,18 @@ class MacroAssembler: public Assembler {
li(s2, Operand(ref));
}
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
// Call a code stub.
void CallStub(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = cc_always,
- Register r1 = zero_reg,
- const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
+ COND_ARGS);
// Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
+ void TailCallStub(CodeStub* stub, COND_ARGS);
+
+#undef COND_ARGS
void CallJSExitStub(CodeStub* stub);
@@ -1270,24 +1237,23 @@ class MacroAssembler: public Assembler {
void CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DoubleRegister dst);
+ void MovFromFloatResult(DoubleRegister dst);
+ void MovFromFloatParameter(DoubleRegister dst);
// There are two ways of passing double arguments on MIPS, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DoubleRegister dreg);
- void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
- void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
+ void CallApiFunctionAndReturn(Register function_address,
ExternalReference thunk_ref,
- Register thunk_last_arg,
int stack_space,
MemOperand return_value_operand,
MemOperand* context_restore_operand);
@@ -1320,6 +1286,10 @@ class MacroAssembler: public Assembler {
return code_object_;
}
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and at gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
// -------------------------------------------------------------------------
// StatsCounter support.
@@ -1444,6 +1414,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
@@ -1522,15 +1496,40 @@ class MacroAssembler: public Assembler {
void NumberOfOwnDescriptors(Register dst, Register map);
template<typename Field>
+ void DecodeField(Register dst, Register src) {
+ Ext(dst, src, Field::kShift, Field::kSize);
+ }
+
+ template<typename Field>
void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register dst, Register src) {
static const int shift = Field::kShift;
- static const int mask = (Field::kMask >> shift) << kSmiTagSize;
- srl(reg, reg, shift);
- And(reg, reg, Operand(mask));
+ static const int mask = Field::kMask >> shift << kSmiTagSize;
+ STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ if (shift < kSmiTagSize) {
+ sll(dst, src, kSmiTagSize - shift);
+ And(dst, dst, Operand(mask));
+ } else if (shift > kSmiTagSize) {
+ srl(dst, src, shift - kSmiTagSize);
+ And(dst, dst, Operand(mask));
+ } else {
+ And(dst, src, Operand(mask));
+ }
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ DecodeField<Field>(reg, reg);
}
// Generates function and stub prologue code.
- void Prologue(PrologueFrameMode frame_mode);
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -1580,14 +1579,6 @@ class MacroAssembler: public Assembler {
int num_reg_arguments,
int num_double_arguments);
- void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- void BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
const Operand& rt,
@@ -1608,8 +1599,7 @@ class MacroAssembler: public Assembler {
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
@@ -1661,7 +1651,14 @@ class MacroAssembler: public Assembler {
// an assertion to fail.
class CodePatcher {
public:
- CodePatcher(byte* address, int instructions);
+ enum FlushICache {
+ FLUSH,
+ DONT_FLUSH
+ };
+
+ CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache = FLUSH);
virtual ~CodePatcher();
// Macro assembler to emit code.
@@ -1681,6 +1678,7 @@ class CodePatcher {
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
+ FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
diff --git a/chromium/v8/src/mips/regexp-macro-assembler-mips.cc b/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
index 49dec3c0246..bbd5e128e5c 100644
--- a/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -1,41 +1,18 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "mips/regexp-macro-assembler-mips.h"
+#include "src/unicode.h"
+#include "src/log.h"
+#include "src/code-stubs.h"
+#include "src/regexp-stack.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/mips/regexp-macro-assembler-mips.h"
namespace v8 {
namespace internal {
@@ -1096,7 +1073,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
__ li(t9, Operand(stack_guard_check));
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(masm_, t9);
// DirectCEntryStub allocated space for the C argument slots so we have to
@@ -1127,7 +1104,8 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
@@ -1153,7 +1131,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+ Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid.
int delta = code_handle->address() - re_code->address();
diff --git a/chromium/v8/src/mips/regexp-macro-assembler-mips.h b/chromium/v8/src/mips/regexp-macro-assembler-mips.h
index 063582c6485..921a84817c8 100644
--- a/chromium/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/chromium/v8/src/mips/regexp-macro-assembler-mips.h
@@ -1,39 +1,16 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#include "macro-assembler.h"
-#include "code.h"
-#include "mips/macro-assembler-mips.h"
+#include "src/mips/assembler-mips.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/macro-assembler.h"
+#include "src/code.h"
+#include "src/mips/macro-assembler-mips.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/mips/simulator-mips.cc b/chromium/v8/src/mips/simulator-mips.cc
index acc65251e23..dfb1ee3f071 100644
--- a/chromium/v8/src/mips/simulator-mips.cc
+++ b/chromium/v8/src/mips/simulator-mips.cc
@@ -1,44 +1,22 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include <stdlib.h>
#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
#include <cmath>
-#include <cstdarg>
-#include "v8.h"
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "cpu.h"
-#include "disasm.h"
-#include "assembler.h"
-#include "globals.h" // Need the BitCast.
-#include "mips/constants-mips.h"
-#include "mips/simulator-mips.h"
+#include "src/cpu.h"
+#include "src/disasm.h"
+#include "src/assembler.h"
+#include "src/globals.h" // Need the BitCast.
+#include "src/mips/constants-mips.h"
+#include "src/mips/simulator-mips.h"
// Only build the simulator if not compiling for real MIPS hardware.
@@ -862,12 +840,12 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
- CHECK(memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize) == 0);
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset),
+ Instruction::kInstrSize));
} else {
// Cache miss. Load memory into the cache.
- OS::MemCopy(cached_line, line, CachePage::kLineLength);
+ memcpy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
@@ -924,6 +902,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
+Simulator::~Simulator() {
+}
+
+
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@@ -971,6 +953,12 @@ class Redirection {
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
+ static void* ReverseRedirection(int32_t reg) {
+ Redirection* redirection = FromSwiInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
private:
void* external_function_;
uint32_t swi_instruction_;
@@ -1059,8 +1047,8 @@ double Simulator::get_double_from_register_pair(int reg) {
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(registers_[0])];
- OS::MemCopy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
@@ -1108,14 +1096,14 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
// Registers a0 and a1 -> x.
reg_buffer[0] = get_register(a0);
reg_buffer[1] = get_register(a1);
- OS::MemCopy(x, buffer, sizeof(buffer));
+ memcpy(x, buffer, sizeof(buffer));
// Registers a2 and a3 -> y.
reg_buffer[0] = get_register(a2);
reg_buffer[1] = get_register(a3);
- OS::MemCopy(y, buffer, sizeof(buffer));
+ memcpy(y, buffer, sizeof(buffer));
// Register 2 -> z.
reg_buffer[0] = get_register(a2);
- OS::MemCopy(z, buffer, sizeof(*z));
+ memcpy(z, buffer, sizeof(*z));
}
}
@@ -1127,7 +1115,7 @@ void Simulator::SetFpResult(const double& result) {
} else {
char buffer[2 * sizeof(registers_[0])];
int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- OS::MemCopy(buffer, &result, sizeof(buffer));
+ memcpy(buffer, &result, sizeof(buffer));
// Copy result to v0 and v1.
set_register(v0, reg_buffer[0]);
set_register(v1, reg_buffer[1]);
@@ -1388,12 +1376,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
typedef void (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, int32_t arg2);
+ int32_t arg0, int32_t arg1, void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
@@ -1554,7 +1542,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- target(arg0, arg1);
+ target(arg0, Redirection::ReverseRedirection(arg1));
} else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
@@ -1572,7 +1560,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
- target(arg0, arg1, arg2);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
@@ -1718,12 +1706,12 @@ void Simulator::SignalExceptions() {
// Handle execution based on instruction types.
void Simulator::ConfigureTypeRegister(Instruction* instr,
- int32_t& alu_out,
- int64_t& i64hilo,
- uint64_t& u64hilo,
- int32_t& next_pc,
- int32_t& return_addr_reg,
- bool& do_interrupt) {
+ int32_t* alu_out,
+ int64_t* i64hilo,
+ uint64_t* u64hilo,
+ int32_t* next_pc,
+ int32_t* return_addr_reg,
+ bool* do_interrupt) {
// Every local variable declared here needs to be const.
// This is to make sure that changed values are sent back to
// DecodeTypeRegister correctly.
@@ -1752,10 +1740,10 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case CFC1:
// At the moment only FCSR is supported.
ASSERT(fs_reg == kFCSRRegister);
- alu_out = FCSR_;
+ *alu_out = FCSR_;
break;
case MFC1:
- alu_out = get_fpu_register(fs_reg);
+ *alu_out = get_fpu_register(fs_reg);
break;
case MFHC1:
UNIMPLEMENTED_MIPS();
@@ -1774,7 +1762,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
break;
default:
UNIMPLEMENTED_MIPS();
- };
+ }
break;
case COP1X:
break;
@@ -1782,56 +1770,56 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
switch (instr->FunctionFieldRaw()) {
case JR:
case JALR:
- next_pc = get_register(instr->RsValue());
- return_addr_reg = instr->RdValue();
+ *next_pc = get_register(instr->RsValue());
+ *return_addr_reg = instr->RdValue();
break;
case SLL:
- alu_out = rt << sa;
+ *alu_out = rt << sa;
break;
case SRL:
if (rs_reg == 0) {
// Regular logical right shift of a word by a fixed number of
// bits instruction. RS field is always equal to 0.
- alu_out = rt_u >> sa;
+ *alu_out = rt_u >> sa;
} else {
// Logical right-rotate of a word by a fixed number of bits. This
// is special case of SRL instruction, added in MIPS32 Release 2.
// RS field is equal to 00001.
- alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ *alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
}
break;
case SRA:
- alu_out = rt >> sa;
+ *alu_out = rt >> sa;
break;
case SLLV:
- alu_out = rt << rs;
+ *alu_out = rt << rs;
break;
case SRLV:
if (sa == 0) {
// Regular logical right-shift of a word by a variable number of
// bits instruction. SA field is always equal to 0.
- alu_out = rt_u >> rs;
+ *alu_out = rt_u >> rs;
} else {
// Logical right-rotate of a word by a variable number of bits.
// This is special case od SRLV instruction, added in MIPS32
// Release 2. SA field is equal to 00001.
- alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ *alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
}
break;
case SRAV:
- alu_out = rt >> rs;
+ *alu_out = rt >> rs;
break;
case MFHI:
- alu_out = get_register(HI);
+ *alu_out = get_register(HI);
break;
case MFLO:
- alu_out = get_register(LO);
+ *alu_out = get_register(LO);
break;
case MULT:
- i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
break;
case MULTU:
- u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
break;
case ADD:
if (HaveSameSign(rs, rt)) {
@@ -1841,10 +1829,10 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
}
}
- alu_out = rs + rt;
+ *alu_out = rs + rt;
break;
case ADDU:
- alu_out = rs + rt;
+ *alu_out = rs + rt;
break;
case SUB:
if (!HaveSameSign(rs, rt)) {
@@ -1854,51 +1842,50 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
}
}
- alu_out = rs - rt;
+ *alu_out = rs - rt;
break;
case SUBU:
- alu_out = rs - rt;
+ *alu_out = rs - rt;
break;
case AND:
- alu_out = rs & rt;
+ *alu_out = rs & rt;
break;
case OR:
- alu_out = rs | rt;
+ *alu_out = rs | rt;
break;
case XOR:
- alu_out = rs ^ rt;
+ *alu_out = rs ^ rt;
break;
case NOR:
- alu_out = ~(rs | rt);
+ *alu_out = ~(rs | rt);
break;
case SLT:
- alu_out = rs < rt ? 1 : 0;
+ *alu_out = rs < rt ? 1 : 0;
break;
case SLTU:
- alu_out = rs_u < rt_u ? 1 : 0;
+ *alu_out = rs_u < rt_u ? 1 : 0;
break;
// Break and trap instructions.
case BREAK:
-
- do_interrupt = true;
+ *do_interrupt = true;
break;
case TGE:
- do_interrupt = rs >= rt;
+ *do_interrupt = rs >= rt;
break;
case TGEU:
- do_interrupt = rs_u >= rt_u;
+ *do_interrupt = rs_u >= rt_u;
break;
case TLT:
- do_interrupt = rs < rt;
+ *do_interrupt = rs < rt;
break;
case TLTU:
- do_interrupt = rs_u < rt_u;
+ *do_interrupt = rs_u < rt_u;
break;
case TEQ:
- do_interrupt = rs == rt;
+ *do_interrupt = rs == rt;
break;
case TNE:
- do_interrupt = rs != rt;
+ *do_interrupt = rs != rt;
break;
case MOVN:
case MOVZ:
@@ -1911,19 +1898,23 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
- alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
+ *alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
break;
case CLZ:
- alu_out = __builtin_clz(rs_u);
+ // MIPS32 spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ // GCC __builtin_clz: If input is 0, the result is undefined.
+ *alu_out =
+ rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
@@ -1934,7 +1925,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
uint16_t lsb = sa;
uint16_t size = msb - lsb + 1;
uint32_t mask = (1 << size) - 1;
- alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ *alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
break;
}
case EXT: { // Mips32r2 instruction.
@@ -1944,16 +1935,16 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
uint16_t lsb = sa;
uint16_t size = msb + 1;
uint32_t mask = (1 << size) - 1;
- alu_out = (rs_u & (mask << lsb)) >> lsb;
+ *alu_out = (rs_u & (mask << lsb)) >> lsb;
break;
}
default:
UNREACHABLE();
- };
+ }
break;
default:
UNREACHABLE();
- };
+ }
}
@@ -1992,12 +1983,12 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// Set up the variables if needed before executing the instruction.
ConfigureTypeRegister(instr,
- alu_out,
- i64hilo,
- u64hilo,
- next_pc,
- return_addr_reg,
- do_interrupt);
+ &alu_out,
+ &i64hilo,
+ &u64hilo,
+ &next_pc,
+ &return_addr_reg,
+ &do_interrupt);
// ---------- Raise exceptions triggered.
SignalExceptions();
@@ -2115,7 +2106,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// In rounding mode 0 it should behave like ROUND.
case ROUND_W_D: // Round double to word (round half to even).
{
- double rounded = floor(fs + 0.5);
+ double rounded = std::floor(fs + 0.5);
int32_t result = static_cast<int32_t>(rounded);
if ((result & 1) != 0 && result - fs == 0.5) {
// If the number is halfway between two integers,
@@ -2140,7 +2131,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case FLOOR_W_D: // Round double to word towards negative infinity.
{
- double rounded = floor(fs);
+ double rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
@@ -2150,7 +2141,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case CEIL_W_D: // Round double to word towards positive infinity.
{
- double rounded = ceil(fs);
+ double rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register(fd_reg, result);
if (set_fcsr_round_error(fs, rounded)) {
@@ -2176,19 +2167,20 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
}
case ROUND_L_D: { // Mips32r2 instruction.
- double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ double rounded =
+ fs > 0 ? std::floor(fs + 0.5) : std::ceil(fs - 0.5);
i64 = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
}
case FLOOR_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(floor(fs));
+ i64 = static_cast<int64_t>(std::floor(fs));
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
case CEIL_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(ceil(fs));
+ i64 = static_cast<int64_t>(std::ceil(fs));
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
@@ -2211,7 +2203,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
case L:
switch (instr->FunctionFieldRaw()) {
@@ -2233,7 +2225,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
case COP1X:
switch (instr->FunctionFieldRaw()) {
@@ -2246,7 +2238,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
@@ -2327,7 +2319,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default: // For other special opcodes we do the default operation.
set_register(rd_reg, alu_out);
- };
+ }
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
@@ -2353,14 +2345,14 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
// cases.
default:
set_register(rd_reg, alu_out);
- };
+ }
}
@@ -2421,7 +2413,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
break;
// ------------- REGIMM class.
case REGIMM:
@@ -2440,7 +2432,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
switch (instr->RtFieldRaw()) {
case BLTZ:
case BLTZAL:
@@ -2459,7 +2451,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
default:
break;
- };
+ }
break; // case REGIMM.
// ------------- Branch instructions.
// When comparing to zero, the encoding of rt field is always 0, so we don't
@@ -2592,7 +2584,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- };
+ }
// ---------- Raise exceptions triggered.
SignalExceptions();
@@ -2668,7 +2660,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
default:
break;
- };
+ }
if (execute_branch_delay_instruction) {
@@ -2894,9 +2886,9 @@ double Simulator::CallFP(byte* entry, double d0, double d1) {
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- OS::MemCopy(buffer, &d0, sizeof(d0));
+ memcpy(buffer, &d0, sizeof(d0));
set_dw_register(a0, buffer);
- OS::MemCopy(buffer, &d1, sizeof(d1));
+ memcpy(buffer, &d1, sizeof(d1));
set_dw_register(a2, buffer);
}
CallInternal(entry);
diff --git a/chromium/v8/src/mips/simulator-mips.h b/chromium/v8/src/mips/simulator-mips.h
index d9fd10f245c..20dde25b4c1 100644
--- a/chromium/v8/src/mips/simulator-mips.h
+++ b/chromium/v8/src/mips/simulator-mips.h
@@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// Declares a Simulator for MIPS instructions if we are not generating a native
@@ -36,8 +13,8 @@
#ifndef V8_MIPS_SIMULATOR_MIPS_H_
#define V8_MIPS_SIMULATOR_MIPS_H_
-#include "allocation.h"
-#include "constants-mips.h"
+#include "src/allocation.h"
+#include "src/mips/constants-mips.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native mips platform.
@@ -61,9 +38,6 @@ typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
(FUNCTION_CAST<mips_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
// just use the C stack limit.
@@ -96,8 +70,8 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
-#include "hashmap.h"
-#include "assembler.h"
+#include "src/hashmap.h"
+#include "src/assembler.h"
namespace v8 {
namespace internal {
@@ -203,6 +177,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
@@ -285,12 +263,12 @@ class Simulator {
// Helper function for DecodeTypeRegister.
void ConfigureTypeRegister(Instruction* instr,
- int32_t& alu_out,
- int64_t& i64hilo,
- uint64_t& u64hilo,
- int32_t& next_pc,
- int32_t& return_addr_reg,
- bool& do_interrupt);
+ int32_t* alu_out,
+ int64_t* i64hilo,
+ uint64_t* u64hilo,
+ int32_t* next_pc,
+ int32_t* return_addr_reg,
+ bool* do_interrupt);
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
@@ -409,10 +387,6 @@ class Simulator {
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. Setting the c_limit to indicate a very small
diff --git a/chromium/v8/src/mips/stub-cache-mips.cc b/chromium/v8/src/mips/stub-cache-mips.cc
index 9f5089d55d9..13e7e4bde9c 100644
--- a/chromium/v8/src/mips/stub-cache-mips.cc
+++ b/chromium/v8/src/mips/stub-cache-mips.cc
@@ -1,37 +1,14 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
+#include "src/ic-inl.h"
+#include "src/codegen.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -287,15 +264,19 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ lw(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ASSERT(!prototype.is(at));
- __ li(at, isolate->global_object());
- __ Branch(miss, ne, prototype, Operand(at));
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ lw(scratch, MemOperand(cp, offset));
+ __ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ lw(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+ __ li(at, function);
+ __ Branch(miss, ne, at, Operand(scratch));
+
// Load its initial map. The global functions all have initial maps.
__ li(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -309,7 +290,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -338,61 +319,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi, t0);
-
- // Check that the object is a string.
- __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ And(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ Branch(non_string_object,
- ne,
- scratch2,
- Operand(static_cast<int32_t>(kStringTag)));
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
-
- // Unwrap the value and check if the wrapped value is a string.
- __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -463,11 +389,29 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ li(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ Handle<Map> current;
+ if (!it.Done()) {
+ __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ // Do the CompareMap() directly within the Branch() functions.
+ current = it.Current();
+ it.Advance();
+ if (it.Done()) {
+ __ Branch(miss_label, ne, scratch1, Operand(current));
+ break;
+ }
+ __ Branch(&do_store, eq, scratch1, Operand(current));
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
@@ -541,15 +485,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -567,15 +511,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Get the properties array
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ sw(value_reg, FieldMemOperand(scratch1, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -617,29 +561,40 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- int index = lookup->GetFieldIndex().field_index();
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
+ FieldIndex index = lookup->GetFieldIndex();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ Handle<Map> current;
+ while (true) {
+ // Do the CompareMap() directly within the Branch() functions.
+ current = it.Current();
+ it.Advance();
+ if (it.Done()) {
+ __ Branch(miss_label, ne, scratch1, Operand(current));
+ break;
+ }
+ __ Branch(&do_store, eq, scratch1, Operand(current));
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
// Load the double storage.
- if (index < 0) {
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ lw(scratch1, FieldMemOperand(receiver_reg, offset));
+ if (index.is_inobject()) {
+ __ lw(scratch1, FieldMemOperand(receiver_reg, index.offset()));
} else {
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ lw(scratch1, FieldMemOperand(scratch1, offset));
+ __ lw(scratch1, FieldMemOperand(scratch1, index.offset()));
}
// Store the value into the storage.
@@ -667,12 +622,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
+ if (index.is_inobject()) {
// Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+ __ sw(value_reg, FieldMemOperand(receiver_reg, index.offset()));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -680,7 +634,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// Pass the now unused name_reg as a scratch register.
__ mov(name_reg, value_reg);
__ RecordWriteField(receiver_reg,
- offset,
+ index.offset(),
name_reg,
scratch1,
kRAHasNotBeenSaved,
@@ -690,13 +644,12 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
} else {
// Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array.
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ sw(value_reg, FieldMemOperand(scratch1, offset));
+ __ sw(value_reg, FieldMemOperand(scratch1, index.offset()));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -704,7 +657,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
- offset,
+ index.offset(),
name_reg,
receiver_reg,
kRAHasNotBeenSaved,
@@ -765,92 +718,71 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- ASSERT(Smi::FromInt(0) == 0);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(zero_reg);
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!receiver.is(scratch_in));
+ // Preparing to push, adjust sp.
+ __ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
+ __ sw(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver.
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch_in.is(arg));
+ __ sw(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg.
}
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
+ ASSERT(optimization.is_simple_api_call());
+ // Abi for CallApiFunctionStub.
+ Register callee = a0;
+ Register call_data = t0;
+ Register holder = a2;
+ Register api_function_address = a1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ li(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
- // : holder (set by CheckPrototypes)
- // -- sp[28] : last JS argument
- // -- ...
- // -- sp[(argc + 6) * 4] : first JS argument
- // -- sp[(argc + 7) * 4] : receiver
- // -----------------------------------
- typedef FunctionCallbackArguments FCA;
- // Save calling context.
- __ sw(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
- // Get the function and setup the context.
+ Isolate* isolate = masm->isolate();
Handle<JSFunction> function = optimization.constant_function();
- __ li(t1, function);
- __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
- __ sw(t1, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
-
- // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ li(a0, api_call_info);
- __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ li(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ li(call_data, api_call_info);
+ __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
} else {
- __ li(t2, call_data);
+ __ li(call_data, call_data_obj);
}
- // Store call data.
- __ sw(t2, MemOperand(sp, FCA::kDataIndex * kPointerSize));
- // Store isolate.
- __ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ sw(t3, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
- // Store ReturnValue default and ReturnValue.
- __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ sw(t1, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
- __ sw(t1, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
-
- // Prepare arguments.
- __ Move(a2, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // a0 = FunctionCallbackInfo&
- // Arguments is built at sp + 1 (sp is a reserved spot for ra).
- __ Addu(a0, sp, kPointerSize);
- // FunctionCallbackInfo::implicit_args_
- __ sw(a2, MemOperand(a0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ Addu(t0, a2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
- __ sw(t0, MemOperand(a0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(t0, Operand(argc));
- __ sw(t0, MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ // Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
@@ -858,249 +790,14 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
ExternalReference(&fun,
type,
masm->isolate());
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- masm->isolate());
-
- AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(
- fp, (2 + FCA::kReturnValueOffset) * kPointerSize);
-
- __ CallApiFunctionAndReturn(ref,
- function_address,
- thunk_ref,
- a1,
- kStackUnwindSpace,
- return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
-}
+ __ li(api_function_address, Operand(ref));
-
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- typedef FunctionCallbackArguments FCA;
- const int stack_space = kFastApiCallArguments + argc + 1;
- // Assign stack space for the call arguments.
- __ Subu(sp, sp, Operand(stack_space * kPointerSize));
- // Write holder to stack frame.
- __ sw(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- // Write receiver to stack frame.
- int index = stack_space - 1;
- __ sw(receiver, MemOperand(sp, index * kPointerSize));
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ sw(receiver, MemOperand(sp, index-- * kPointerSize));
- }
-
- GenerateFastApiDirectCall(masm, optimization, argc, true);
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
}
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(CallStubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
- Counters* counters = masm->isolate()->counters();
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
- handle(lookup->holder()), scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(
- masm, optimization, arguments_.immediate(), false);
- } else {
- Handle<JSFunction> function = optimization.constant_function();
- __ Move(a0, receiver);
- stub_compiler_->GenerateJumpFunction(object, function);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ Branch(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
-
- // Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, interceptor_holder,
- IC::kLoadPropertyWithInterceptorForCall);
-
- // Restore the name_ register.
- __ pop(name_);
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- __ Push(receiver, holder, name_);
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, holder_obj,
- IC::kLoadPropertyWithInterceptorOnly);
- __ pop(name_);
- __ pop(holder);
- __ pop(receiver);
- }
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
- }
-
- CallStubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- ExtraICState extra_ic_state_;
-};
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1110,20 +807,16 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<Type> type,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ li(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1134,13 +827,10 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register reg = object_reg;
int depth = 0;
- typedef FunctionCallbackArguments FCA;
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
Handle<JSObject> current = Handle<JSObject>::null();
- if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant()->Value());
+ }
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder->map());
@@ -1163,7 +853,7 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current.is_null() ||
- current->property_dictionary()->FindEntry(*name) ==
+ current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1204,10 +894,6 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
}
}
- if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
- }
-
// Go to the next object in the prototype chain.
current = prototype;
current_map = handle(current->map());
@@ -1256,7 +942,7 @@ void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<Type> type,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1302,19 +988,15 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex field,
+ FieldIndex field,
Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
} else {
- KeyedLoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ KeyedLoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
}
}
@@ -1327,13 +1009,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1369,37 +1044,17 @@ void LoadStubCompiler::GenerateLoadCallback(
__ Addu(scratch2(), sp, 1 * kPointerSize);
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
- __ mov(a0, sp); // (first argument - a0) = Handle<Name>
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object** args_) as the data.
- __ sw(a2, MemOperand(sp, kPointerSize));
- // (second argument - a1) = AccessorInfo&
- __ Addu(a1, sp, kPointerSize);
+ // Abi for CallApiGetter.
+ Register getter_address_reg = a2;
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ li(getter_address_reg, Operand(ref));
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
- ExternalReference::Type thunk_type =
- ExternalReference::PROFILING_GETTER_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- isolate());
- __ CallApiFunctionAndReturn(ref,
- getter_address,
- thunk_ref,
- a2,
- kStackUnwindSpace,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
}
@@ -1482,1049 +1137,25 @@ void LoadStubCompiler::GenerateLoadInterceptor(
this->name(), interceptor_holder);
ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
__ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Branch(miss, ne, a2, Operand(name));
- }
-}
-
-
-void CallStubCompiler::GenerateFunctionCheck(Register function,
- Register scratch,
- Label* miss) {
- __ JumpIfSmi(function, miss);
- __ GetObjectType(function, scratch, scratch);
- __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ li(a3, Operand(cell));
- __ lw(a1, FieldMemOperand(a3, Cell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- GenerateFunctionCheck(a1, a3, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
- __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Branch(miss, ne, t0, Operand(a3));
- } else {
- __ Branch(miss, ne, a1, Operand(function));
- }
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state());
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(
- object, holder, name, RECEIVER_MAP_CHECK, &miss);
- GenerateFastPropertyLoad(masm(), a1, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
- GenerateJumpFunction(object, a1, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->SetElementsKind(GetInitialFastElementsKind());
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- const int argc = arguments().immediate();
- __ li(a0, Operand(argc));
- __ li(a2, Operand(site_feedback_cell));
- __ li(a1, Operand(function));
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- Register receiver = a0;
- Register scratch = a1;
-
- const int argc = arguments().immediate();
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ DropAndRet(argc + 1);
- } else {
- Label call_builtin;
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- Register elements = t2;
- Register end_elements = t1;
- // Get the elements array of the object.
- __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &check_double,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into scratch and calculate new length.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
-
- // Check if value is a smi.
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(t0, &with_write_barrier);
-
- // Save new length.
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
-
- // Check for a smi.
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
-
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- scratch,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into scratch and calculate new length.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&call_builtin, gt, scratch, Operand(t0));
-
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(
- t0, scratch, elements, a3, t1, a2,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
-
- __ bind(&with_write_barrier);
-
- __ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(a3, t3, &not_fast_object);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(a3, t3, &call_builtin);
-
- __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&call_builtin, eq, t3, Operand(at));
- // edx: receiver
- // a3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- a3,
- t3,
- &try_holey_map);
- __ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- a3,
- t3,
- &call_builtin);
- __ mov(a2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(a3, a3, &call_builtin);
- }
-
- // Save new length.
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
-
- __ RecordWrite(elements,
- end_elements,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
-
- __ bind(&attempt_to_grow_elements);
- // scratch: array's length + 1.
- // t0: elements' length.
-
- if (!FLAG_inline_new) {
- __ Branch(&call_builtin);
- }
-
- __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(a2, &no_fast_elements_check);
- __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(t3, t3, &call_builtin);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
- __ li(t3, Operand(new_space_allocation_top));
- __ lw(a3, MemOperand(t3));
- __ Branch(&call_builtin, ne, end_elements, Operand(a3));
-
- __ li(t5, Operand(new_space_allocation_limit));
- __ lw(t5, MemOperand(t5));
- __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
- __ Branch(&call_builtin, hi, a3, Operand(t5));
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ sw(a3, MemOperand(t3));
- // Push the argument.
- __ sw(a2, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ sw(a3, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
- __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
- }
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss, return_undefined, call_builtin;
- Register receiver = a0;
- Register scratch = a1;
- Register elements = a3;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- // Get the elements array of the object.
- __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into t0 and calculate new length.
- __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Subu(t0, t0, Operand(Smi::FromInt(1)));
- __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
-
- // Get the last element.
- __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(elements, elements, t1);
- __ lw(scratch, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Branch(&call_builtin, eq, scratch, Operand(t2));
-
- // Set the array's length.
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
- const int argc = arguments().immediate();
- __ mov(v0, scratch);
- __ DropAndRet(argc + 1);
-
- __ bind(&return_undefined);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ DropAndRet(argc + 1);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
-
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = a0;
- Register index = t1;
- Register result = a1;
- const int argc = arguments().immediate();
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ mov(v0, result);
- __ DropAndRet(argc + 1);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kNanValueRootIndex);
- __ DropAndRet(argc + 1);
- }
-
- __ bind(&miss);
- // Restore function name in a2.
- __ li(a2, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = a0;
- Register index = t1;
- Register scratch = a3;
- Register result = a1;
- if (argc > 0) {
- __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ mov(v0, result);
- __ DropAndRet(argc + 1);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ DropAndRet(argc + 1);
- }
-
- __ bind(&miss);
- // Restore function name in a2.
- __ li(a2, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = a1;
- __ lw(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ And(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, v0);
- generator.GenerateFast(masm());
- __ DropAndRet(argc + 1);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss, slow;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into v0.
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTst(v0, t0);
- __ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
-
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
-
- Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
-
- // If fpu is enabled, we use the floor instruction.
-
- // Load the HeapNumber value.
- __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
-
- // Backup FCSR.
- __ cfc1(a3, FCSR);
- // Clearing FCSR clears the exception mask with no side-effects.
- __ ctc1(zero_reg, FCSR);
- // Convert the argument to an integer.
- __ floor_w_d(f0, f0);
-
- // Start checking for special cases.
- // Get the argument exponent and clear the sign bit.
- __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
- __ And(t2, t1, Operand(~HeapNumber::kSignMask));
- __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
-
- // Retrieve FCSR and check for fpu errors.
- __ cfc1(t5, FCSR);
- __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
- __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
-
- // Check for NaN, Infinity, and -Infinity.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
- >> HeapNumber::kMantissaBitsInTopWord));
- __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
- // We had an overflow or underflow in the conversion. Check if we
- // have a big exponent.
- // If greater or equal, the argument is already round and in v0.
- __ Branch(&restore_fcsr_and_return, ge, t3,
- Operand(HeapNumber::kMantissaBits));
- __ Branch(&wont_fit_smi);
-
- __ bind(&no_fpu_error);
- // Move the result back to v0.
- __ mfc1(v0, f0);
- // Check if the result fits into a smi.
- __ Addu(a1, v0, Operand(0x40000000));
- __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
- // Tag the result.
- STATIC_ASSERT(kSmiTag == 0);
- __ sll(v0, v0, kSmiTagSize);
-
- // Check for -0.
- __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
- // t1 already holds the HeapNumber exponent.
- __ And(t0, t1, Operand(HeapNumber::kSignMask));
- // If our HeapNumber is negative it was -0, so load its address and return.
- // Else v0 is loaded with 0, so we can also just return.
- __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- __ bind(&restore_fcsr_and_return);
- // Restore FCSR and return.
- __ ctc1(a3, FCSR);
-
- __ DropAndRet(argc + 1);
-
- __ bind(&wont_fit_smi);
- // Restore FCSR and fall to slow case.
- __ ctc1(a3, FCSR);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into v0.
- __ lw(v0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(v0, &not_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ sra(t0, v0, kBitsPerInt - 1);
- __ Xor(a1, v0, t0);
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ Subu(v0, a1, t0);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ Branch(&slow, lt, v0, Operand(zero_reg));
-
- // Smi case done.
- __ DropAndRet(argc + 1);
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(&not_smi);
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ And(t0, a1, Operand(HeapNumber::kSignMask));
- __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
- __ DropAndRet(argc + 1);
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
- __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
- __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ DropAndRet(argc + 1);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
-
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(a1, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, a0, a3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
-
- ReserveSpaceForFastApiCall(masm(), a0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(
- IC::CurrentTypeOf(object, isolate()),
- a1, holder, a0, a3, t0, name, depth, &miss);
-
- GenerateFastApiDirectCall(masm(), optimization, argc, false);
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- HandlerFrontendFooter(&miss_before_stack_reserved);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
- Label success;
- // Check that the object is a boolean.
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(&success, eq, object, Operand(at));
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- __ Branch(miss, ne, object, Operand(at));
- __ bind(&success);
-}
-
-
-void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
- if (object->IsGlobalObject()) {
- const int argc = arguments().immediate();
- const int receiver_offset = argc * kPointerSize;
- __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, receiver_offset));
- }
-}
-
-
-Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* miss) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- GenerateNameCheck(name, miss);
-
- Register reg = a0;
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- const int receiver_offset = argc * kPointerSize;
- __ lw(a0, MemOperand(sp, receiver_offset));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(a0, miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1, a1, a3);
-
- // Check that the maps haven't changed.
- reg = CheckPrototypes(
- IC::CurrentTypeOf(object, isolate()),
- reg, holder, a1, a3, t0, name, miss);
- break;
-
- case STRING_CHECK: {
- // Check that the object is a string.
- __ GetObjectType(reg, a3, a3);
- __ Branch(miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, a1, miss);
- break;
- }
- case SYMBOL_CHECK: {
- // Check that the object is a symbol.
- __ GetObjectType(reg, a1, a3);
- __ Branch(miss, ne, a3, Operand(SYMBOL_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, a1, miss);
- break;
- }
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(reg, &fast);
- __ GetObjectType(reg, a3, a3);
- __ Branch(miss, ne, a3, Operand(HEAP_NUMBER_TYPE));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, a1, miss);
- break;
- }
- case BOOLEAN_CHECK: {
- GenerateBooleanCheck(reg, miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, a1, miss);
- break;
- }
- }
-
- if (check != RECEIVER_MAP_CHECK) {
- Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
- reg = CheckPrototypes(
- IC::CurrentTypeOf(prototype, isolate()),
- a1, holder, a1, a3, t0, name, miss);
- }
-
- return reg;
-}
-
-
-void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
- Register function,
- Label* miss) {
- ASSERT(function.is(a1));
- // Check that the function really is a function.
- GenerateFunctionCheck(function, a3, miss);
- PatchGlobalProxy(object);
- // Invoke the function.
- __ InvokeFunction(a1, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind());
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), a2, extra_state());
- compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
- &miss);
-
- // Move returned value, the function to call, to a1.
- __ mov(a1, v0);
- // Restore receiver.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
-
- GenerateJumpFunction(object, a1, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- // Potentially loads a closure that matches the shared function info of the
- // function, rather than function.
- GenerateLoadFunctionFromCell(cell, function, &miss);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
- GenerateJumpFunction(object, a1, function);
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
// Stub never generated for non-global objects that require access
// checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- __ push(receiver()); // Receiver.
+ __ Push(receiver(), holder_reg); // Receiver.
__ li(at, Operand(callback)); // Callback info.
__ push(at);
__ li(at, Operand(name));
@@ -2533,24 +1164,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -2563,27 +1177,31 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
// -- ra : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
- __ push(a0);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ push(a1);
- __ push(a0);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ lw(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2607,21 +1225,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -2629,16 +1232,12 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name) {
NonexistentHandlerFrontend(type, last, name);
@@ -2666,31 +1265,22 @@ Register* KeyedLoadStubCompiler::registers() {
}
-Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { a1, a2, a0, a3, t0, t1 };
- return registers;
+Register StoreStubCompiler::value() {
+ return a0;
}
-Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { a2, a1, a0, a3, t0, t1 };
+Register* StoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a1, a2, a3, t0, t1 };
return registers;
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Branch(miss, ne, name_reg, Operand(name));
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Branch(miss, ne, name_reg, Operand(name));
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a2, a1, a3, t0, t1 };
+ return registers;
}
@@ -2699,6 +1289,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
@@ -2711,11 +1302,17 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ lw(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
__ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2734,7 +1331,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
@@ -2753,13 +1350,13 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Branch(&miss, eq, t0, Operand(at));
}
- HandlerFrontendFooter(name, &miss);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, t0);
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
@@ -2773,8 +1370,9 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ Branch(&miss, ne, this->name(), Operand(name));
}
Label number_case;
@@ -2788,14 +1386,14 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
int number_of_handled_maps = 0;
__ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Type> type = types->at(current);
+ Handle<HeapType> type = types->at(current);
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
// Check map and tail call if there's a match.
// Separate compare from branch, to provide path for above JumpIfSmi().
__ Subu(match, map_reg, Operand(map));
- if (type->Is(Type::Number())) {
+ if (type->Is(HeapType::Number())) {
ASSERT(!number_case.is_unused());
__ bind(&number_case);
}
@@ -2815,6 +1413,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,